mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 07:10:27 +00:00
bpf, devmap: Exclude XDP broadcast to master device
If the ingress device is bond slave, do not broadcast back through it or the bond master. Signed-off-by: Jussi Maki <joamaki@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20210731055738.16820-5-joamaki@gmail.com
This commit is contained in:
parent
9e2ee5c7e7
commit
aeea1b86f9
@ -534,10 +534,9 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
|||||||
return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
|
return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
|
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp)
|
||||||
int exclude_ifindex)
|
|
||||||
{
|
{
|
||||||
if (!obj || obj->dev->ifindex == exclude_ifindex ||
|
if (!obj ||
|
||||||
!obj->dev->netdev_ops->ndo_xdp_xmit)
|
!obj->dev->netdev_ops->ndo_xdp_xmit)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -562,17 +561,48 @@ static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
|
||||||
|
{
|
||||||
|
while (num_excluded--) {
|
||||||
|
if (ifindex == excluded[num_excluded])
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get ifindex of each upper device. 'indexes' must be able to hold at
|
||||||
|
* least MAX_NEST_DEV elements.
|
||||||
|
* Returns the number of ifindexes added.
|
||||||
|
*/
|
||||||
|
static int get_upper_ifindexes(struct net_device *dev, int *indexes)
|
||||||
|
{
|
||||||
|
struct net_device *upper;
|
||||||
|
struct list_head *iter;
|
||||||
|
int n = 0;
|
||||||
|
|
||||||
|
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
|
||||||
|
indexes[n++] = upper->ifindex;
|
||||||
|
}
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
||||||
struct bpf_map *map, bool exclude_ingress)
|
struct bpf_map *map, bool exclude_ingress)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
|
|
||||||
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
||||||
|
int excluded_devices[1+MAX_NEST_DEV];
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
|
int num_excluded = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (exclude_ingress) {
|
||||||
|
num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
|
||||||
|
excluded_devices[num_excluded++] = dev_rx->ifindex;
|
||||||
|
}
|
||||||
|
|
||||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
@ -581,7 +611,10 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
|||||||
for (i = 0; i < map->max_entries; i++) {
|
for (i = 0; i < map->max_entries; i++) {
|
||||||
dst = rcu_dereference_check(dtab->netdev_map[i],
|
dst = rcu_dereference_check(dtab->netdev_map[i],
|
||||||
rcu_read_lock_bh_held());
|
rcu_read_lock_bh_held());
|
||||||
if (!is_valid_dst(dst, xdp, exclude_ifindex))
|
if (!is_valid_dst(dst, xdp))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* we only need n-1 clones; last_dst enqueued below */
|
/* we only need n-1 clones; last_dst enqueued below */
|
||||||
@ -601,7 +634,11 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
|||||||
head = dev_map_index_hash(dtab, i);
|
head = dev_map_index_hash(dtab, i);
|
||||||
hlist_for_each_entry_rcu(dst, head, index_hlist,
|
hlist_for_each_entry_rcu(dst, head, index_hlist,
|
||||||
lockdep_is_held(&dtab->index_lock)) {
|
lockdep_is_held(&dtab->index_lock)) {
|
||||||
if (!is_valid_dst(dst, xdp, exclude_ifindex))
|
if (!is_valid_dst(dst, xdp))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (is_ifindex_excluded(excluded_devices, num_excluded,
|
||||||
|
dst->dev->ifindex))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* we only need n-1 clones; last_dst enqueued below */
|
/* we only need n-1 clones; last_dst enqueued below */
|
||||||
@ -675,18 +712,27 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
|
|||||||
bool exclude_ingress)
|
bool exclude_ingress)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
|
|
||||||
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
||||||
|
int excluded_devices[1+MAX_NEST_DEV];
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct hlist_node *next;
|
struct hlist_node *next;
|
||||||
|
int num_excluded = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (exclude_ingress) {
|
||||||
|
num_excluded = get_upper_ifindexes(dev, excluded_devices);
|
||||||
|
excluded_devices[num_excluded++] = dev->ifindex;
|
||||||
|
}
|
||||||
|
|
||||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
||||||
for (i = 0; i < map->max_entries; i++) {
|
for (i = 0; i < map->max_entries; i++) {
|
||||||
dst = rcu_dereference_check(dtab->netdev_map[i],
|
dst = rcu_dereference_check(dtab->netdev_map[i],
|
||||||
rcu_read_lock_bh_held());
|
rcu_read_lock_bh_held());
|
||||||
if (!dst || dst->dev->ifindex == exclude_ifindex)
|
if (!dst)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* we only need n-1 clones; last_dst enqueued below */
|
/* we only need n-1 clones; last_dst enqueued below */
|
||||||
@ -700,12 +746,17 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
last_dst = dst;
|
last_dst = dst;
|
||||||
|
|
||||||
}
|
}
|
||||||
} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
|
} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
|
||||||
for (i = 0; i < dtab->n_buckets; i++) {
|
for (i = 0; i < dtab->n_buckets; i++) {
|
||||||
head = dev_map_index_hash(dtab, i);
|
head = dev_map_index_hash(dtab, i);
|
||||||
hlist_for_each_entry_safe(dst, next, head, index_hlist) {
|
hlist_for_each_entry_safe(dst, next, head, index_hlist) {
|
||||||
if (!dst || dst->dev->ifindex == exclude_ifindex)
|
if (!dst)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (is_ifindex_excluded(excluded_devices, num_excluded,
|
||||||
|
dst->dev->ifindex))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* we only need n-1 clones; last_dst enqueued below */
|
/* we only need n-1 clones; last_dst enqueued below */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user