mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 00:08:50 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2020-12-10 The following pull-request contains BPF updates for your *net* tree. We've added 21 non-merge commits during the last 12 day(s) which contain a total of 21 files changed, 163 insertions(+), 88 deletions(-). The main changes are: 1) Fix propagation of 32-bit signed bounds from 64-bit bounds, from Alexei. 2) Fix ring_buffer__poll() return value, from Andrii. 3) Fix race in lwt_bpf, from Cong. 4) Fix test_offload, from Toke. 5) Various xsk fixes. Please consider pulling these changes from: git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git Thanks a lot! Also thanks to reporters, reviewers and testers of commits in this pull-request: Cong Wang, Hulk Robot, Jakub Kicinski, Jean-Philippe Brucker, John Fastabend, Magnus Karlsson, Maxim Mikityanskiy, Yonghong Song ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d9838b1d39
@ -3239,7 +3239,7 @@ R: Martin KaFai Lau <kafai@fb.com>
|
||||
R: Song Liu <songliubraving@fb.com>
|
||||
R: Yonghong Song <yhs@fb.com>
|
||||
R: John Fastabend <john.fastabend@gmail.com>
|
||||
R: KP Singh <kpsingh@chromium.org>
|
||||
R: KP Singh <kpsingh@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Supported
|
||||
@ -3358,7 +3358,7 @@ F: arch/x86/net/
|
||||
X: arch/x86/net/bpf_jit_comp32.c
|
||||
|
||||
BPF LSM (Security Audit and Enforcement using BPF)
|
||||
M: KP Singh <kpsingh@chromium.org>
|
||||
M: KP Singh <kpsingh@kernel.org>
|
||||
R: Florent Revest <revest@chromium.org>
|
||||
R: Brendan Jackman <jackmanb@chromium.org>
|
||||
L: bpf@vger.kernel.org
|
||||
|
@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
|
||||
struct nfp_net_dp *dp;
|
||||
int err;
|
||||
|
||||
if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
|
||||
return -EBUSY;
|
||||
|
||||
if (!prog == !nn->dp.xdp_prog) {
|
||||
WRITE_ONCE(nn->dp.xdp_prog, prog);
|
||||
xdp_attachment_setup(&nn->xdp, bpf);
|
||||
@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
|
||||
return -EBUSY;
|
||||
|
||||
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
|
||||
if (!priv->xdpi.prog && !prog)
|
||||
return 0;
|
||||
|
||||
if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
|
||||
return -EBUSY;
|
||||
|
||||
WRITE_ONCE(priv->xdp_prog, prog);
|
||||
|
||||
xdp_attachment_setup(&priv->xdpi, bpf);
|
||||
|
@ -63,15 +63,20 @@ static int
|
||||
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
|
||||
{
|
||||
struct nsim_bpf_bound_prog *state;
|
||||
int ret = 0;
|
||||
|
||||
state = env->prog->aux->offload->dev_priv;
|
||||
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
|
||||
msleep(state->nsim_dev->bpf_bind_verifier_delay);
|
||||
|
||||
if (insn_idx == env->prog->len - 1)
|
||||
if (insn_idx == env->prog->len - 1) {
|
||||
pr_vlog(env, "Hello from netdevsim!\n");
|
||||
|
||||
return 0;
|
||||
if (!state->nsim_dev->bpf_bind_verifier_accept)
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nsim_bpf_finalize(struct bpf_verifier_env *env)
|
||||
@ -190,9 +195,6 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!xdp_attachment_flags_ok(xdp, bpf))
|
||||
return -EBUSY;
|
||||
|
||||
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
|
||||
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
|
||||
return -EOPNOTSUPP;
|
||||
@ -598,6 +600,9 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
|
||||
&nsim_dev->bpf_bind_accept);
|
||||
debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
|
||||
&nsim_dev->bpf_bind_verifier_delay);
|
||||
nsim_dev->bpf_bind_verifier_accept = true;
|
||||
debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
|
||||
&nsim_dev->bpf_bind_verifier_accept);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -189,6 +189,7 @@ struct nsim_dev {
|
||||
struct dentry *take_snapshot;
|
||||
struct bpf_offload_dev *bpf_dev;
|
||||
bool bpf_bind_accept;
|
||||
bool bpf_bind_verifier_accept;
|
||||
u32 bpf_bind_verifier_delay;
|
||||
struct dentry *ddir_bpf_bound_progs;
|
||||
u32 prog_id_gen;
|
||||
|
@ -240,8 +240,6 @@ struct xdp_attachment_info {
|
||||
};
|
||||
|
||||
struct netdev_bpf;
|
||||
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf);
|
||||
void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf);
|
||||
|
||||
|
@ -1298,9 +1298,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
|
||||
|
||||
static bool __reg64_bound_s32(s64 a)
|
||||
{
|
||||
if (a > S32_MIN && a < S32_MAX)
|
||||
return true;
|
||||
return false;
|
||||
return a > S32_MIN && a < S32_MAX;
|
||||
}
|
||||
|
||||
static bool __reg64_bound_u32(u64 a)
|
||||
@ -1314,10 +1312,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
|
||||
{
|
||||
__mark_reg32_unbounded(reg);
|
||||
|
||||
if (__reg64_bound_s32(reg->smin_value))
|
||||
if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
|
||||
reg->s32_min_value = (s32)reg->smin_value;
|
||||
if (__reg64_bound_s32(reg->smax_value))
|
||||
reg->s32_max_value = (s32)reg->smax_value;
|
||||
}
|
||||
if (__reg64_bound_u32(reg->umin_value))
|
||||
reg->u32_min_value = (u32)reg->umin_value;
|
||||
if (__reg64_bound_u32(reg->umax_value))
|
||||
@ -4895,6 +4893,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
|
||||
|
||||
ret_reg->smax_value = meta->msize_max_value;
|
||||
ret_reg->s32_max_value = meta->msize_max_value;
|
||||
ret_reg->smin_value = -MAX_ERRNO;
|
||||
ret_reg->s32_min_value = -MAX_ERRNO;
|
||||
__reg_deduce_bounds(ret_reg);
|
||||
__reg_bound_offset(ret_reg);
|
||||
__update_reg_bounds(ret_reg);
|
||||
|
@ -8917,6 +8917,17 @@ static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
|
||||
return dev->xdp_state[mode].prog;
|
||||
}
|
||||
|
||||
static u8 dev_xdp_prog_count(struct net_device *dev)
|
||||
{
|
||||
u8 count = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __MAX_XDP_MODE; i++)
|
||||
if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
|
||||
count++;
|
||||
return count;
|
||||
}
|
||||
|
||||
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
|
||||
{
|
||||
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
|
||||
@ -9007,6 +9018,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
|
||||
struct bpf_xdp_link *link, struct bpf_prog *new_prog,
|
||||
struct bpf_prog *old_prog, u32 flags)
|
||||
{
|
||||
unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
|
||||
struct bpf_prog *cur_prog;
|
||||
enum bpf_xdp_mode mode;
|
||||
bpf_op_t bpf_op;
|
||||
@ -9022,11 +9034,17 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
|
||||
NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* just one XDP mode bit should be set, zero defaults to SKB mode */
|
||||
if (hweight32(flags & XDP_FLAGS_MODES) > 1) {
|
||||
/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
|
||||
if (num_modes > 1) {
|
||||
NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
|
||||
if (!num_modes && dev_xdp_prog_count(dev) > 1) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"More than one program loaded, unset mode is ambiguous");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
|
||||
if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
|
||||
NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
|
||||
|
@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Preempt disable is needed to protect per-cpu redirect_info between
|
||||
* BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
|
||||
* access to maps strictly require a rcu_read_lock() for protection,
|
||||
* mixing with BH RCU lock doesn't work.
|
||||
/* Migration disable and BH disable are needed to protect per-cpu
|
||||
* redirect_info between BPF prog and skb_do_redirect().
|
||||
*/
|
||||
preempt_disable();
|
||||
migrate_disable();
|
||||
local_bh_disable();
|
||||
bpf_compute_data_pointers(skb);
|
||||
ret = bpf_prog_run_save_cb(lwt->prog, skb);
|
||||
|
||||
@ -78,7 +77,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
|
||||
break;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
migrate_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -335,11 +335,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
||||
* scenarios (e.g. queue full), it is possible to return the xdp_frame
|
||||
* while still leveraging this protection. The @napi_direct boolean
|
||||
* is used for those calls sites. Thus, allowing for faster recycling
|
||||
* of xdp_frames/pages in those cases. This path is never used by the
|
||||
* MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
|
||||
* the switch-statement.
|
||||
* of xdp_frames/pages in those cases.
|
||||
*/
|
||||
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
|
||||
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
struct xdp_mem_allocator *xa;
|
||||
struct page *page;
|
||||
@ -361,6 +360,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
|
||||
page = virt_to_page(data); /* Assumes order0 page*/
|
||||
put_page(page);
|
||||
break;
|
||||
case MEM_TYPE_XSK_BUFF_POOL:
|
||||
/* NB! Only valid from an xdp_buff! */
|
||||
xsk_buff_free(xdp);
|
||||
break;
|
||||
default:
|
||||
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
|
||||
WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
|
||||
@ -370,19 +373,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
|
||||
|
||||
void xdp_return_frame(struct xdp_frame *xdpf)
|
||||
{
|
||||
__xdp_return(xdpf->data, &xdpf->mem, false);
|
||||
__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_return_frame);
|
||||
|
||||
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
|
||||
{
|
||||
__xdp_return(xdpf->data, &xdpf->mem, true);
|
||||
__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
|
||||
|
||||
void xdp_return_buff(struct xdp_buff *xdp)
|
||||
{
|
||||
__xdp_return(xdp->data, &xdp->rxq->mem, true);
|
||||
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
|
||||
}
|
||||
|
||||
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
|
||||
@ -400,18 +403,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__xdp_release_frame);
|
||||
|
||||
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf)
|
||||
{
|
||||
if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
|
||||
NL_SET_ERR_MSG(bpf->extack,
|
||||
"program loaded with different flags");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
|
||||
|
||||
void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf)
|
||||
{
|
||||
|
@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool xsk_tx_writeable(struct xdp_sock *xs)
|
||||
{
|
||||
if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool xsk_is_bound(struct xdp_sock *xs)
|
||||
{
|
||||
if (READ_ONCE(xs->state) == XSK_BOUND) {
|
||||
@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
|
||||
__xskq_cons_release(xs->tx);
|
||||
xs->sk.sk_write_space(&xs->sk);
|
||||
if (xsk_tx_writeable(xs))
|
||||
xs->sk.sk_write_space(&xs->sk);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||
|
||||
out:
|
||||
if (sent_frame)
|
||||
sk->sk_write_space(sk);
|
||||
if (xsk_tx_writeable(xs))
|
||||
sk->sk_write_space(sk);
|
||||
|
||||
mutex_unlock(&xs->mutex);
|
||||
return err;
|
||||
@ -471,11 +481,13 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
|
||||
static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
struct poll_table_struct *wait)
|
||||
{
|
||||
__poll_t mask = datagram_poll(file, sock, wait);
|
||||
__poll_t mask = 0;
|
||||
struct sock *sk = sock->sk;
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
struct xsk_buff_pool *pool;
|
||||
|
||||
sock_poll_wait(file, sock, wait);
|
||||
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return mask;
|
||||
|
||||
@ -491,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
|
||||
if (xs->rx && !xskq_prod_is_empty(xs->rx))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (xs->tx && !xskq_cons_is_full(xs->tx))
|
||||
if (xs->tx && xsk_tx_writeable(xs))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
return mask;
|
||||
|
@ -175,6 +175,7 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool,
|
||||
|
||||
if (!pool->dma_pages) {
|
||||
WARN(1, "Driver did not DMA map zero-copy buffers");
|
||||
err = -EINVAL;
|
||||
goto err_unreg_xsk;
|
||||
}
|
||||
pool->umem->zc = true;
|
||||
|
@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
|
||||
q->nentries;
|
||||
}
|
||||
|
||||
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
|
||||
{
|
||||
/* No barriers needed since data is not accessed */
|
||||
return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
|
||||
}
|
||||
|
||||
/* Functions for producers */
|
||||
|
||||
static inline bool xskq_prod_is_full(struct xsk_queue *q)
|
||||
|
@ -89,9 +89,9 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level,
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
{
|
||||
char buf[4096];
|
||||
struct pid_iter_bpf *skel;
|
||||
struct pid_iter_entry *e;
|
||||
char buf[4096 / sizeof(*e) * sizeof(*e)];
|
||||
struct pid_iter_bpf *skel;
|
||||
int err, ret, fd = -1, i;
|
||||
libbpf_print_fn_t default_print;
|
||||
|
||||
|
@ -278,7 +278,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
|
||||
err = ringbuf_process_ring(ring);
|
||||
if (err < 0)
|
||||
return err;
|
||||
res += cnt;
|
||||
res += err;
|
||||
}
|
||||
return cnt < 0 ? -errno : res;
|
||||
}
|
||||
|
@ -456,10 +456,10 @@ static struct bpf_align_test tests[] = {
|
||||
*/
|
||||
{7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
|
||||
/* Checked s>=0 */
|
||||
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
|
||||
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
/* packet pointer + nonnegative (4n+2) */
|
||||
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
|
||||
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
|
||||
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
|
||||
* We checked the bounds, but it might have been able
|
||||
* to overflow if the packet pointer started in the
|
||||
@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {
|
||||
* So we did not get a 'range' on R6, and the access
|
||||
* attempt will fail.
|
||||
*/
|
||||
{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
|
||||
{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -217,9 +217,15 @@ void test_ringbuf(void)
|
||||
if (CHECK(err, "join_bg", "err %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
if (CHECK(bg_ret != 1, "bg_ret", "epoll_wait result: %ld", bg_ret))
|
||||
if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
|
||||
goto cleanup;
|
||||
|
||||
/* due to timing variations, there could still be non-notified
|
||||
* samples, so consume them here to collect all the samples
|
||||
*/
|
||||
err = ring_buffer__consume(ringbuf);
|
||||
CHECK(err < 0, "rb_consume", "failed: %d\b", err);
|
||||
|
||||
/* 3 rounds, 2 samples each */
|
||||
cnt = atomic_xchg(&sample_cnt, 0);
|
||||
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
|
||||
|
@ -81,7 +81,7 @@ void test_ringbuf_multi(void)
|
||||
|
||||
/* poll for samples, should get 2 ringbufs back */
|
||||
err = ring_buffer__poll(ringbuf, -1);
|
||||
if (CHECK(err != 4, "poll_res", "expected 4 records, got %d\n", err))
|
||||
if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
/* expect extra polling to return nothing */
|
||||
|
@ -184,9 +184,7 @@ def bpftool_prog_list(expected=None, ns=""):
|
||||
def bpftool_map_list(expected=None, ns=""):
|
||||
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
|
||||
# Remove the base maps
|
||||
for m in base_maps:
|
||||
if m in maps:
|
||||
maps.remove(m)
|
||||
maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names]
|
||||
if expected is not None:
|
||||
if len(maps) != expected:
|
||||
fail(True, "%d BPF maps loaded, expected %d" %
|
||||
@ -716,13 +714,11 @@ def test_multi_prog(simdev, sim, obj, modename, modeid):
|
||||
fail(ret == 0, "Replaced one of programs without -force")
|
||||
check_extack(err, "XDP program already attached.", args)
|
||||
|
||||
if modename == "" or modename == "drv":
|
||||
othermode = "" if modename == "drv" else "drv"
|
||||
start_test("Test multi-attachment XDP - detach...")
|
||||
ret, _, err = sim.unset_xdp(othermode, force=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "Removed program with a bad mode")
|
||||
check_extack(err, "program loaded with different flags.", args)
|
||||
start_test("Test multi-attachment XDP - remove without mode...")
|
||||
ret, _, err = sim.unset_xdp("", force=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "Removed program without a mode flag")
|
||||
check_extack(err, "More than one program loaded, unset mode is ambiguous.", args)
|
||||
|
||||
sim.unset_xdp("offload")
|
||||
xdp = sim.ip_link_show(xdp=True)["xdp"]
|
||||
@ -772,6 +768,9 @@ ret, progs = bpftool("prog", fail=False)
|
||||
skip(ret != 0, "bpftool not installed")
|
||||
base_progs = progs
|
||||
_, base_maps = bpftool("map")
|
||||
base_map_names = [
|
||||
'pid_iter.rodata' # created on each bpftool invocation
|
||||
]
|
||||
|
||||
# Check netdevsim
|
||||
ret, out = cmd("modprobe netdevsim", fail=False)
|
||||
@ -913,11 +912,18 @@ try:
|
||||
|
||||
sim.tc_flush_filters()
|
||||
|
||||
start_test("Test TC offloads failure...")
|
||||
sim.dfs["dev/bpf_bind_verifier_accept"] = 0
|
||||
ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "TC filter did not reject with TC offloads enabled")
|
||||
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
|
||||
sim.dfs["dev/bpf_bind_verifier_accept"] = 1
|
||||
|
||||
start_test("Test TC offloads work...")
|
||||
ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret != 0, "TC filter did not load with TC offloads enabled")
|
||||
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
|
||||
|
||||
start_test("Test TC offload basics...")
|
||||
dfs = simdev.dfs_get_bound_progs(expected=1)
|
||||
@ -941,6 +947,7 @@ try:
|
||||
start_test("Test disabling TC offloads is rejected while filters installed...")
|
||||
ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
|
||||
fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
|
||||
sim.set_ethtool_tc_offloads(True)
|
||||
|
||||
start_test("Test qdisc removal frees things...")
|
||||
sim.tc_flush_filters()
|
||||
@ -999,18 +1006,8 @@ try:
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "Replaced XDP program with a program in different mode")
|
||||
check_extack(err,
|
||||
"native and generic XDP can't be active at the same time.",
|
||||
"Native and generic XDP can't be active at the same time.",
|
||||
args)
|
||||
ret, _, err = sim.set_xdp(obj, "", force=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "Replaced XDP program with a program in different mode")
|
||||
check_extack(err, "program loaded with different flags.", args)
|
||||
|
||||
start_test("Test XDP prog remove with bad flags...")
|
||||
ret, _, err = sim.unset_xdp("", force=True,
|
||||
fail=False, include_stderr=True)
|
||||
fail(ret == 0, "Removed program with a bad mode")
|
||||
check_extack(err, "program loaded with different flags.", args)
|
||||
|
||||
start_test("Test MTU restrictions...")
|
||||
ret, _ = sim.set_mtu(9000, fail=False)
|
||||
@ -1040,10 +1037,19 @@ try:
|
||||
offload = bpf_pinned("/sys/fs/bpf/offload")
|
||||
ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
|
||||
fail(ret == 0, "attached offloaded XDP program to drv")
|
||||
check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
|
||||
check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args)
|
||||
rm("/sys/fs/bpf/offload")
|
||||
sim.wait_for_flush()
|
||||
|
||||
start_test("Test XDP load failure...")
|
||||
sim.dfs["dev/bpf_bind_verifier_accept"] = 0
|
||||
ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
|
||||
dev=sim['ifname'], fail=False, include_stderr=True)
|
||||
fail(ret == 0, "verifier should fail on load")
|
||||
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
|
||||
sim.dfs["dev/bpf_bind_verifier_accept"] = 1
|
||||
sim.wait_for_flush()
|
||||
|
||||
start_test("Test XDP offload...")
|
||||
_, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
|
||||
ipl = sim.ip_link_show(xdp=True)
|
||||
@ -1051,7 +1057,6 @@ try:
|
||||
progs = bpftool_prog_list(expected=1)
|
||||
prog = progs[0]
|
||||
fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
|
||||
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
|
||||
|
||||
start_test("Test XDP offload is device bound...")
|
||||
dfs = simdev.dfs_get_bound_progs(expected=1)
|
||||
|
@ -68,7 +68,7 @@
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
|
||||
BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
|
||||
|
@ -703,3 +703,44 @@
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bounds checks after 32-bit truncation. test 1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
/* This used to reduce the max bound to 0x7fffffff */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bounds checks after 32-bit truncation. test 2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1),
|
||||
BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
|
Loading…
x
Reference in New Issue
Block a user