2019-05-19 12:08:20 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-03-04 01:10:47 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/socket.h>
|
2015-03-04 01:11:20 +00:00
|
|
|
#include <linux/sysctl.h>
|
2015-03-04 01:10:47 +00:00
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/mpls.h>
|
2017-02-20 16:03:30 +00:00
|
|
|
#include <linux/netconf.h>
|
2018-02-08 06:34:24 +00:00
|
|
|
#include <linux/nospec.h>
|
2015-03-05 02:37:05 +00:00
|
|
|
#include <linux/vmalloc.h>
|
2017-01-16 14:16:37 +00:00
|
|
|
#include <linux/percpu.h>
|
2023-06-08 19:17:37 +00:00
|
|
|
#include <net/gso.h>
|
2015-03-04 01:10:47 +00:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/dst.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/arp.h>
|
|
|
|
#include <net/ip_fib.h>
|
|
|
|
#include <net/netevent.h>
|
2017-10-04 17:35:57 +00:00
|
|
|
#include <net/ip_tunnels.h>
|
2015-03-04 01:10:47 +00:00
|
|
|
#include <net/netns/generic.h>
|
2015-07-30 20:34:54 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#endif
|
2019-03-22 13:06:09 +00:00
|
|
|
#include <net/ipv6_stubs.h>
|
2019-04-20 16:28:20 +00:00
|
|
|
#include <net/rtnh.h>
|
2015-03-04 01:10:47 +00:00
|
|
|
#include "internal.h"
|
|
|
|
|
2017-03-31 14:14:02 +00:00
|
|
|
/* max memory we will use for mpls_route */
|
|
|
|
#define MAX_MPLS_ROUTE_MEM 4096
|
|
|
|
|
2015-10-23 13:03:28 +00:00
|
|
|
/* Maximum number of labels to look ahead at when selecting a path of
|
|
|
|
* a multipath route
|
|
|
|
*/
|
|
|
|
#define MAX_MP_SELECT_LABELS 4
|
|
|
|
|
2015-12-10 19:30:50 +00:00
|
|
|
#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
|
|
|
|
|
2015-03-04 01:11:20 +00:00
|
|
|
static int label_limit = (1 << 20) - 1;
|
2017-03-10 20:43:25 +00:00
|
|
|
static int ttl_max = 255;
|
2015-03-04 01:11:20 +00:00
|
|
|
|
2017-10-04 17:35:57 +00:00
|
|
|
#if IS_ENABLED(CONFIG_NET_IP_TUNNEL)
|
2017-10-11 09:53:28 +00:00
|
|
|
static size_t ipgre_mpls_encap_hlen(struct ip_tunnel_encap *e)
|
2017-10-04 17:35:57 +00:00
|
|
|
{
|
|
|
|
return sizeof(struct mpls_shim_hdr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ip_tunnel_encap_ops mpls_iptun_ops = {
|
|
|
|
.encap_hlen = ipgre_mpls_encap_hlen,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ipgre_tunnel_encap_add_mpls_ops(void)
|
|
|
|
{
|
|
|
|
return ip_tunnel_encap_add_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ipgre_tunnel_encap_del_mpls_ops(void)
|
|
|
|
{
|
|
|
|
ip_tunnel_encap_del_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int ipgre_tunnel_encap_add_mpls_ops(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ipgre_tunnel_encap_del_mpls_ops(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-03-04 01:14:31 +00:00
|
|
|
static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
|
|
|
|
struct nlmsghdr *nlh, struct net *net, u32 portid,
|
|
|
|
unsigned int nlm_flags);
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
|
|
|
|
{
|
|
|
|
struct mpls_route *rt = NULL;
|
|
|
|
|
|
|
|
if (index < net->mpls.platform_labels) {
|
|
|
|
struct mpls_route __rcu **platform_label =
|
|
|
|
rcu_dereference(net->mpls.platform_label);
|
|
|
|
rt = rcu_dereference(platform_label[index]);
|
|
|
|
}
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:43:52 +00:00
|
|
|
bool mpls_output_possible(const struct net_device *dev)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
|
|
|
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
|
|
|
|
}
|
2015-07-21 08:43:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mpls_output_possible);
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
|
|
|
|
{
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
return (u8 *)nh + rt->rt_via_offset;
|
2015-10-27 00:37:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const u8 *mpls_nh_via(const struct mpls_route *rt,
|
|
|
|
const struct mpls_nh *nh)
|
|
|
|
{
|
|
|
|
return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
|
|
|
|
}
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
|
|
|
/* The size of the layer 2.5 labels to be added for this route */
|
2015-10-23 13:03:27 +00:00
|
|
|
return nh->nh_labels * sizeof(struct mpls_shim_hdr);
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
|
2015-07-21 08:43:52 +00:00
|
|
|
unsigned int mpls_dev_mtu(const struct net_device *dev)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
|
|
|
/* The amount of data the layer 2 frame can hold */
|
|
|
|
return dev->mtu;
|
|
|
|
}
|
2015-07-21 08:43:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mpls_dev_mtu);
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-07-21 08:43:52 +00:00
|
|
|
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
|
|
|
if (skb->len <= mtu)
|
|
|
|
return false;
|
|
|
|
|
2018-03-01 06:13:37 +00:00
|
|
|
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
|
2015-03-04 01:10:47 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2015-07-21 08:43:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
void mpls_stats_inc_outucastpkts(struct net_device *dev,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct mpls_dev *mdev;
|
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_MPLS_UC)) {
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (mdev)
|
|
|
|
MPLS_INC_STATS_LEN(mdev, skb->len,
|
|
|
|
tx_packets,
|
|
|
|
tx_bytes);
|
|
|
|
} else if (skb->protocol == htons(ETH_P_IP)) {
|
|
|
|
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
|
|
struct inet6_dev *in6dev = __in6_dev_get(dev);
|
|
|
|
|
|
|
|
if (in6dev)
|
|
|
|
IP6_UPD_PO_STATS(dev_net(dev), in6dev,
|
|
|
|
IPSTATS_MIB_OUT, skb->len);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
|
2015-10-23 13:03:27 +00:00
|
|
|
{
|
2015-10-23 13:03:28 +00:00
|
|
|
struct mpls_entry_decoded dec;
|
2017-01-20 20:58:34 +00:00
|
|
|
unsigned int mpls_hdr_len = 0;
|
2015-10-23 13:03:28 +00:00
|
|
|
struct mpls_shim_hdr *hdr;
|
|
|
|
bool eli_seen = false;
|
|
|
|
int label_index;
|
|
|
|
u32 hash = 0;
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
|
2015-10-23 13:03:28 +00:00
|
|
|
label_index++) {
|
2017-01-20 20:58:34 +00:00
|
|
|
mpls_hdr_len += sizeof(*hdr);
|
|
|
|
if (!pskb_may_pull(skb, mpls_hdr_len))
|
2015-10-23 13:03:28 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Read and decode the current label */
|
|
|
|
hdr = mpls_hdr(skb) + label_index;
|
|
|
|
dec = mpls_entry_decode(hdr);
|
|
|
|
|
|
|
|
/* RFC6790 - reserved labels MUST NOT be used as keys
|
|
|
|
* for the load-balancing function
|
|
|
|
*/
|
|
|
|
if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
|
|
|
|
hash = jhash_1word(dec.label, hash);
|
|
|
|
|
|
|
|
/* The entropy label follows the entropy label
|
|
|
|
* indicator, so this means that the entropy
|
|
|
|
* label was just added to the hash - no need to
|
|
|
|
* go any deeper either in the label stack or in the
|
|
|
|
* payload
|
|
|
|
*/
|
|
|
|
if (eli_seen)
|
|
|
|
break;
|
|
|
|
} else if (dec.label == MPLS_LABEL_ENTROPY) {
|
|
|
|
eli_seen = true;
|
|
|
|
}
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
if (!dec.bos)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* found bottom label; does skb have room for a header? */
|
|
|
|
if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
|
2015-10-23 13:03:28 +00:00
|
|
|
const struct iphdr *v4hdr;
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
v4hdr = (const struct iphdr *)(hdr + 1);
|
2015-10-23 13:03:28 +00:00
|
|
|
if (v4hdr->version == 4) {
|
|
|
|
hash = jhash_3words(ntohl(v4hdr->saddr),
|
|
|
|
ntohl(v4hdr->daddr),
|
|
|
|
v4hdr->protocol, hash);
|
|
|
|
} else if (v4hdr->version == 6 &&
|
2017-01-20 20:58:34 +00:00
|
|
|
pskb_may_pull(skb, mpls_hdr_len +
|
|
|
|
sizeof(struct ipv6hdr))) {
|
2015-10-23 13:03:28 +00:00
|
|
|
const struct ipv6hdr *v6hdr;
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
v6hdr = (const struct ipv6hdr *)(hdr + 1);
|
2015-10-23 13:03:28 +00:00
|
|
|
hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
|
|
|
|
hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
|
|
|
|
hash = jhash_1word(v6hdr->nexthdr, hash);
|
|
|
|
}
|
|
|
|
}
|
2017-01-20 20:58:34 +00:00
|
|
|
|
|
|
|
break;
|
2015-10-23 13:03:28 +00:00
|
|
|
}
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
|
|
|
|
{
|
|
|
|
return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
|
|
|
|
}
|
|
|
|
|
2017-03-31 14:13:59 +00:00
|
|
|
/* number of alive nexthops (rt->rt_nhn_alive) and the flags for
|
|
|
|
* a next hop (nh->nh_flags) are modified by netdev event handlers.
|
|
|
|
* Since those fields can change at any moment, use READ_ONCE to
|
|
|
|
* access both.
|
|
|
|
*/
|
2021-11-29 06:23:16 +00:00
|
|
|
static const struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
|
|
|
|
struct sk_buff *skb)
|
2015-12-02 06:18:11 +00:00
|
|
|
{
|
|
|
|
u32 hash = 0;
|
|
|
|
int nh_index = 0;
|
|
|
|
int n = 0;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 alive;
|
2015-12-02 06:18:11 +00:00
|
|
|
|
|
|
|
/* No need to look further into packet if there's only
|
|
|
|
* one path
|
|
|
|
*/
|
|
|
|
if (rt->rt_nhn == 1)
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
return rt->rt_nh;
|
2015-12-02 06:18:11 +00:00
|
|
|
|
2017-03-31 14:13:59 +00:00
|
|
|
alive = READ_ONCE(rt->rt_nhn_alive);
|
|
|
|
if (alive == 0)
|
2015-12-02 06:18:11 +00:00
|
|
|
return NULL;
|
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
hash = mpls_multipath_hash(rt, skb);
|
2015-12-02 06:18:11 +00:00
|
|
|
nh_index = hash % alive;
|
|
|
|
if (alive == rt->rt_nhn)
|
|
|
|
goto out;
|
|
|
|
for_nexthops(rt) {
|
2017-03-31 14:13:59 +00:00
|
|
|
unsigned int nh_flags = READ_ONCE(nh->nh_flags);
|
|
|
|
|
|
|
|
if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
|
2015-12-02 06:18:11 +00:00
|
|
|
continue;
|
|
|
|
if (n == nh_index)
|
|
|
|
return nh;
|
|
|
|
n++;
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
|
2015-10-23 13:03:28 +00:00
|
|
|
out:
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
return mpls_get_nexthop(rt, nh_index);
|
2015-10-23 13:03:27 +00:00
|
|
|
}
|
|
|
|
|
2017-03-10 20:43:24 +00:00
|
|
|
static bool mpls_egress(struct net *net, struct mpls_route *rt,
|
|
|
|
struct sk_buff *skb, struct mpls_entry_decoded dec)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
2015-08-06 10:04:56 +00:00
|
|
|
enum mpls_payload_type payload_type;
|
|
|
|
bool success = false;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-03-12 23:22:59 +00:00
|
|
|
/* The IPv4 code below accesses through the IPv4 header
|
|
|
|
* checksum, which is 12 bytes into the packet.
|
|
|
|
* The IPv6 code below accesses through the IPv6 hop limit
|
|
|
|
* which is 8 bytes into the packet.
|
|
|
|
*
|
|
|
|
* For all supported cases there should always be at least 12
|
|
|
|
* bytes of packet data present. The IPv4 header is 20 bytes
|
|
|
|
* without options and the IPv6 header is always 40 bytes
|
|
|
|
* long.
|
|
|
|
*/
|
|
|
|
if (!pskb_may_pull(skb, 12))
|
|
|
|
return false;
|
|
|
|
|
2015-08-06 10:04:56 +00:00
|
|
|
payload_type = rt->rt_payload_type;
|
|
|
|
if (payload_type == MPT_UNSPEC)
|
|
|
|
payload_type = ip_hdr(skb)->version;
|
|
|
|
|
|
|
|
switch (payload_type) {
|
|
|
|
case MPT_IPV4: {
|
|
|
|
struct iphdr *hdr4 = ip_hdr(skb);
|
2017-03-10 20:43:24 +00:00
|
|
|
u8 new_ttl;
|
2015-03-04 01:10:47 +00:00
|
|
|
skb->protocol = htons(ETH_P_IP);
|
2017-03-10 20:43:24 +00:00
|
|
|
|
|
|
|
/* If propagating TTL, take the decremented TTL from
|
|
|
|
* the incoming MPLS header, otherwise decrement the
|
|
|
|
* TTL, but only if not 0 to avoid underflow.
|
|
|
|
*/
|
|
|
|
if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
|
|
|
|
(rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
|
|
|
|
net->mpls.ip_ttl_propagate))
|
|
|
|
new_ttl = dec.ttl;
|
|
|
|
else
|
|
|
|
new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
csum_replace2(&hdr4->check,
|
|
|
|
htons(hdr4->ttl << 8),
|
2017-03-10 20:43:24 +00:00
|
|
|
htons(new_ttl << 8));
|
|
|
|
hdr4->ttl = new_ttl;
|
2015-08-06 10:04:56 +00:00
|
|
|
success = true;
|
|
|
|
break;
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
2015-08-06 10:04:56 +00:00
|
|
|
case MPT_IPV6: {
|
2015-03-04 01:10:47 +00:00
|
|
|
struct ipv6hdr *hdr6 = ipv6_hdr(skb);
|
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
2017-03-10 20:43:24 +00:00
|
|
|
|
|
|
|
/* If propagating TTL, take the decremented TTL from
|
|
|
|
* the incoming MPLS header, otherwise decrement the
|
|
|
|
* hop limit, but only if not 0 to avoid underflow.
|
|
|
|
*/
|
|
|
|
if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
|
|
|
|
(rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
|
|
|
|
net->mpls.ip_ttl_propagate))
|
|
|
|
hdr6->hop_limit = dec.ttl;
|
|
|
|
else if (hdr6->hop_limit)
|
|
|
|
hdr6->hop_limit = hdr6->hop_limit - 1;
|
2015-08-06 10:04:56 +00:00
|
|
|
success = true;
|
|
|
|
break;
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
2015-08-06 10:04:56 +00:00
|
|
|
case MPT_UNSPEC:
|
2017-03-10 20:43:24 +00:00
|
|
|
/* Should have decided which protocol it is by now */
|
2015-08-06 10:04:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct packet_type *pt, struct net_device *orig_dev)
|
|
|
|
{
|
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct mpls_shim_hdr *hdr;
|
2021-11-29 06:23:16 +00:00
|
|
|
const struct mpls_nh *nh;
|
2015-03-04 01:10:47 +00:00
|
|
|
struct mpls_route *rt;
|
|
|
|
struct mpls_entry_decoded dec;
|
|
|
|
struct net_device *out_dev;
|
2017-01-16 14:16:37 +00:00
|
|
|
struct mpls_dev *out_mdev;
|
2015-04-22 10:14:37 +00:00
|
|
|
struct mpls_dev *mdev;
|
2015-03-04 01:10:47 +00:00
|
|
|
unsigned int hh_len;
|
|
|
|
unsigned int new_header_size;
|
|
|
|
unsigned int mtu;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Careful this entire function runs inside of an rcu critical section */
|
|
|
|
|
2015-04-22 10:14:37 +00:00
|
|
|
mdev = mpls_dev_get(dev);
|
2017-01-16 14:16:37 +00:00
|
|
|
if (!mdev)
|
2015-04-22 10:14:37 +00:00
|
|
|
goto drop;
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
|
|
|
|
rx_bytes);
|
|
|
|
|
|
|
|
if (!mdev->input_enabled) {
|
|
|
|
MPLS_INC_STATS(mdev, rx_dropped);
|
2015-03-04 01:10:47 +00:00
|
|
|
goto drop;
|
2017-01-16 14:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (skb->pkt_type != PACKET_HOST)
|
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, sizeof(*hdr)))
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2020-10-31 00:07:25 +00:00
|
|
|
skb_dst_drop(skb);
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
/* Read and decode the label */
|
|
|
|
hdr = mpls_hdr(skb);
|
|
|
|
dec = mpls_entry_decode(hdr);
|
|
|
|
|
|
|
|
rt = mpls_route_input_rcu(net, dec.label);
|
2017-01-16 14:16:37 +00:00
|
|
|
if (!rt) {
|
|
|
|
MPLS_INC_STATS(mdev, rx_noroute);
|
2015-03-04 01:10:47 +00:00
|
|
|
goto drop;
|
2017-01-16 14:16:37 +00:00
|
|
|
}
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
nh = mpls_select_multipath(rt, skb);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (!nh)
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2017-01-20 20:58:34 +00:00
|
|
|
/* Pop the label */
|
|
|
|
skb_pull(skb, sizeof(*hdr));
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
|
|
|
|
skb_orphan(skb);
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
if (skb_warn_if_lro(skb))
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
skb_forward_csum(skb);
|
|
|
|
|
|
|
|
/* Verify ttl is valid */
|
2015-03-07 22:23:23 +00:00
|
|
|
if (dec.ttl <= 1)
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
/* Find the output device */
|
2021-11-29 06:15:06 +00:00
|
|
|
out_dev = nh->nh_dev;
|
2017-01-16 14:16:37 +00:00
|
|
|
if (!mpls_output_possible(out_dev))
|
|
|
|
goto tx_err;
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
/* Verify the destination can hold the packet */
|
2015-10-23 13:03:27 +00:00
|
|
|
new_header_size = mpls_nh_header_size(nh);
|
2015-03-04 01:10:47 +00:00
|
|
|
mtu = mpls_dev_mtu(out_dev);
|
|
|
|
if (mpls_pkt_too_big(skb, mtu - new_header_size))
|
2017-01-16 14:16:37 +00:00
|
|
|
goto tx_err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
hh_len = LL_RESERVED_SPACE(out_dev);
|
|
|
|
if (!out_dev->header_ops)
|
|
|
|
hh_len = 0;
|
|
|
|
|
|
|
|
/* Ensure there is enough space for the headers in the skb */
|
|
|
|
if (skb_cow(skb, hh_len + new_header_size))
|
2017-01-16 14:16:37 +00:00
|
|
|
goto tx_err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
skb->dev = out_dev;
|
|
|
|
skb->protocol = htons(ETH_P_MPLS_UC);
|
|
|
|
|
2021-07-22 18:50:28 +00:00
|
|
|
dec.ttl -= 1;
|
2015-03-04 01:10:47 +00:00
|
|
|
if (unlikely(!new_header_size && dec.bos)) {
|
|
|
|
/* Penultimate hop popping */
|
2017-03-10 20:43:24 +00:00
|
|
|
if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
|
2017-01-16 14:16:37 +00:00
|
|
|
goto err;
|
2015-03-04 01:10:47 +00:00
|
|
|
} else {
|
|
|
|
bool bos;
|
|
|
|
int i;
|
|
|
|
skb_push(skb, new_header_size);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
/* Push the new labels */
|
|
|
|
hdr = mpls_hdr(skb);
|
|
|
|
bos = dec.bos;
|
2015-10-23 13:03:27 +00:00
|
|
|
for (i = nh->nh_labels - 1; i >= 0; i--) {
|
|
|
|
hdr[i] = mpls_entry_encode(nh->nh_label[i],
|
|
|
|
dec.ttl, 0, bos);
|
2015-03-04 01:10:47 +00:00
|
|
|
bos = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
mpls_stats_inc_outucastpkts(out_dev, skb);
|
|
|
|
|
2015-12-10 19:30:50 +00:00
|
|
|
/* If via wasn't specified then send out using device address */
|
|
|
|
if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
|
|
|
|
err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
|
|
|
|
out_dev->dev_addr, skb);
|
|
|
|
else
|
|
|
|
err = neigh_xmit(nh->nh_via_table, out_dev,
|
|
|
|
mpls_nh_via(rt, nh), skb);
|
2015-03-04 01:10:47 +00:00
|
|
|
if (err)
|
|
|
|
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
return 0;
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
tx_err:
|
|
|
|
out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
|
|
|
|
if (out_mdev)
|
|
|
|
MPLS_INC_STATS(out_mdev, tx_errors);
|
|
|
|
goto drop;
|
|
|
|
err:
|
|
|
|
MPLS_INC_STATS(mdev, rx_errors);
|
2015-03-04 01:10:47 +00:00
|
|
|
drop:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NET_RX_DROP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct packet_type mpls_packet_type __read_mostly = {
|
|
|
|
.type = cpu_to_be16(ETH_P_MPLS_UC),
|
|
|
|
.func = mpls_forward,
|
|
|
|
};
|
|
|
|
|
2015-03-04 21:33:54 +00:00
|
|
|
static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
|
2015-03-04 01:13:56 +00:00
|
|
|
[RTA_DST] = { .type = NLA_U32 },
|
|
|
|
[RTA_OIF] = { .type = NLA_U32 },
|
2017-03-10 20:43:24 +00:00
|
|
|
[RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
|
2015-03-04 01:13:56 +00:00
|
|
|
};
|
|
|
|
|
2015-03-04 01:12:40 +00:00
|
|
|
struct mpls_route_config {
|
2015-08-06 10:04:56 +00:00
|
|
|
u32 rc_protocol;
|
|
|
|
u32 rc_ifindex;
|
2015-10-23 13:03:27 +00:00
|
|
|
u8 rc_via_table;
|
|
|
|
u8 rc_via_alen;
|
2015-08-06 10:04:56 +00:00
|
|
|
u8 rc_via[MAX_VIA_ALEN];
|
|
|
|
u32 rc_label;
|
2017-03-10 20:43:24 +00:00
|
|
|
u8 rc_ttl_propagate;
|
2015-10-23 13:03:27 +00:00
|
|
|
u8 rc_output_labels;
|
2015-08-06 10:04:56 +00:00
|
|
|
u32 rc_output_label[MAX_NEW_LABELS];
|
|
|
|
u32 rc_nlflags;
|
|
|
|
enum mpls_payload_type rc_payload_type;
|
|
|
|
struct nl_info rc_nlinfo;
|
2015-10-23 13:03:27 +00:00
|
|
|
struct rtnexthop *rc_mp;
|
|
|
|
int rc_mp_len;
|
2015-03-04 01:12:40 +00:00
|
|
|
};
|
|
|
|
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
/* all nexthops within a route have the same size based on max
|
|
|
|
* number of labels and max via length for a hop
|
|
|
|
*/
|
|
|
|
static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
net: mpls: change mpls_route layout
Move labels to the end of mpls_nh as a 0-sized array and within mpls_route
move the via for a nexthop after the mpls_nh. The new layout becomes:
+----------------------+
| mpls_route |
+----------------------+
| mpls_nh 0 |
+----------------------+
| alignment padding | 4 bytes for odd number of labels; 0 for even
+----------------------+
| via[rt_max_alen] 0 |
+----------------------+
| alignment padding | via's aligned on sizeof(unsigned long)
+----------------------+
| ... |
+----------------------+
| mpls_nh n-1 |
+----------------------+
| via[rt_max_alen] n-1 |
+----------------------+
Memory allocated for nexthop + via is constant across all nexthops and
their via. It is based on the maximum number of labels across all nexthops
and the maximum via length. The size is saved in the mpls_route as
rt_nh_size. Accessing a nexthop becomes rt->rt_nh + index * rt->rt_nh_size.
The offset of the via address from a nexthop is saved as rt_via_offset
so that given an mpls_nh pointer the via for that hop is simply
nh + rt->rt_via_offset.
With prior code, memory allocated per mpls_route with 1 nexthop:
via is an ethernet address - 64 bytes
via is an ipv4 address - 64
via is an ipv6 address - 72
With this patch set, memory allocated per mpls_route with 1 nexthop and
1 or 2 labels:
via is an ethernet address - 56 bytes
via is an ipv4 address - 56
via is an ipv6 address - 64
The 8-byte reduction is due to the previous patch; the change introduced
by this patch has no impact on the size of allocations for 1 or 2 labels.
Performance impact of this change was examined using network namespaces
with veth pairs connecting namespaces. ns0 inserts the packet to the
label-switched path using an lwt route with encap mpls. ns1 adds 1 or 2
labels depending on test, ns2 (and ns3 for 2-label test) pops the label
and forwards. ns3 (or ns4) for a 2-label is the destination. Similar
series of namespaces used for 2-nexthop test.
Intent is to measure changes to latency (overhead in manipulating the
packet) in the forwarding path. Tests used netperf with UDP_RR.
IPv4: current patches
1 label, 1 nexthop 29908 30115
2 label, 1 nexthop 29071 29612
1 label, 2 nexthop 29582 29776
2 label, 2 nexthop 29086 29149
IPv6: current patches
1 label, 1 nexthop 24502 24960
2 label, 1 nexthop 24041 24407
1 label, 2 nexthop 23795 23899
2 label, 2 nexthop 23074 22959
In short, the change has no effect to a modest increase in performance.
This is expected since this patch does not really have an impact on routes
with 1 or 2 labels (the current limit) and 1 or 2 nexthops.
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-31 14:14:01 +00:00
|
|
|
u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
|
2015-03-04 01:10:47 +00:00
|
|
|
struct mpls_route *rt;
|
2017-03-31 14:14:02 +00:00
|
|
|
size_t size;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2017-03-31 14:14:02 +00:00
|
|
|
size = sizeof(*rt) + num_nh * nh_size;
|
|
|
|
if (size > MAX_MPLS_ROUTE_MEM)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
rt = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!rt)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rt->rt_nhn = num_nh;
|
|
|
|
rt->rt_nhn_alive = num_nh;
|
|
|
|
rt->rt_nh_size = nh_size;
|
|
|
|
rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mpls_rt_free(struct mpls_route *rt)
|
|
|
|
{
|
|
|
|
if (rt)
|
|
|
|
kfree_rcu(rt, rt_rcu);
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:14:31 +00:00
|
|
|
static void mpls_notify_route(struct net *net, unsigned index,
|
|
|
|
struct mpls_route *old, struct mpls_route *new,
|
|
|
|
const struct nl_info *info)
|
|
|
|
{
|
|
|
|
struct nlmsghdr *nlh = info ? info->nlh : NULL;
|
|
|
|
unsigned portid = info ? info->portid : 0;
|
|
|
|
int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
|
|
|
|
struct mpls_route *rt = new ? new : old;
|
|
|
|
unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
|
|
|
|
/* Ignore reserved labels for now */
|
2015-08-03 16:50:04 +00:00
|
|
|
if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
|
2015-03-04 01:14:31 +00:00
|
|
|
rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
static void mpls_route_update(struct net *net, unsigned index,
|
2015-10-23 13:03:27 +00:00
|
|
|
struct mpls_route *new,
|
2015-03-04 01:10:47 +00:00
|
|
|
const struct nl_info *info)
|
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
2015-10-23 13:03:27 +00:00
|
|
|
struct mpls_route *rt;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
rt = rtnl_dereference(platform_label[index]);
|
2015-10-23 13:03:27 +00:00
|
|
|
rcu_assign_pointer(platform_label[index], new);
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
mpls_notify_route(net, index, rt, new, info);
|
2015-03-04 01:14:31 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
/* If we removed a route free it now */
|
2015-10-23 13:03:27 +00:00
|
|
|
mpls_rt_free(rt);
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 01:12:40 +00:00
|
|
|
static unsigned find_free_label(struct net *net)
|
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
|
|
|
size_t platform_labels;
|
2015-03-04 01:12:40 +00:00
|
|
|
unsigned index;
|
2015-03-07 22:21:56 +00:00
|
|
|
|
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
platform_labels = net->mpls.platform_labels;
|
2015-08-03 16:50:04 +00:00
|
|
|
for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
|
|
|
|
index++) {
|
2015-03-07 22:21:56 +00:00
|
|
|
if (!rtnl_dereference(platform_label[index]))
|
2015-03-04 01:12:40 +00:00
|
|
|
return index;
|
|
|
|
}
|
|
|
|
return LABEL_NOT_SPECIFIED;
|
|
|
|
}
|
|
|
|
|
2015-07-30 20:34:54 +00:00
|
|
|
#if IS_ENABLED(CONFIG_INET)
|
2015-10-27 00:37:36 +00:00
|
|
|
static struct net_device *inet_fib_lookup_dev(struct net *net,
|
|
|
|
const void *addr)
|
2015-07-21 16:16:24 +00:00
|
|
|
{
|
2015-08-04 07:44:22 +00:00
|
|
|
struct net_device *dev;
|
2015-07-21 16:16:24 +00:00
|
|
|
struct rtable *rt;
|
|
|
|
struct in_addr daddr;
|
|
|
|
|
|
|
|
memcpy(&daddr, addr, sizeof(struct in_addr));
|
2024-04-05 20:05:00 +00:00
|
|
|
rt = ip_route_output(net, daddr.s_addr, 0, 0, 0, RT_SCOPE_UNIVERSE);
|
2015-07-21 16:16:24 +00:00
|
|
|
if (IS_ERR(rt))
|
2015-08-04 07:44:22 +00:00
|
|
|
return ERR_CAST(rt);
|
2015-07-21 16:16:24 +00:00
|
|
|
|
|
|
|
dev = rt->dst.dev;
|
|
|
|
dev_hold(dev);
|
|
|
|
|
|
|
|
ip_rt_put(rt);
|
|
|
|
|
|
|
|
return dev;
|
2015-07-30 20:34:54 +00:00
|
|
|
}
|
|
|
|
#else
|
2015-10-27 00:37:36 +00:00
|
|
|
static struct net_device *inet_fib_lookup_dev(struct net *net,
|
|
|
|
const void *addr)
|
2015-07-30 20:34:54 +00:00
|
|
|
{
|
|
|
|
return ERR_PTR(-EAFNOSUPPORT);
|
2015-07-21 16:16:24 +00:00
|
|
|
}
|
2015-07-30 20:34:54 +00:00
|
|
|
#endif
|
2015-07-21 16:16:24 +00:00
|
|
|
|
2015-07-30 20:34:54 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2015-10-27 00:37:36 +00:00
|
|
|
static struct net_device *inet6_fib_lookup_dev(struct net *net,
|
|
|
|
const void *addr)
|
2015-07-21 16:16:24 +00:00
|
|
|
{
|
2015-08-04 07:44:22 +00:00
|
|
|
struct net_device *dev;
|
2015-07-21 16:16:24 +00:00
|
|
|
struct dst_entry *dst;
|
|
|
|
struct flowi6 fl6;
|
2015-07-30 20:34:54 +00:00
|
|
|
|
|
|
|
if (!ipv6_stub)
|
|
|
|
return ERR_PTR(-EAFNOSUPPORT);
|
2015-07-21 16:16:24 +00:00
|
|
|
|
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
|
|
|
memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
|
2019-12-04 14:35:53 +00:00
|
|
|
dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
|
|
|
|
if (IS_ERR(dst))
|
|
|
|
return ERR_CAST(dst);
|
2015-07-21 16:16:24 +00:00
|
|
|
|
|
|
|
dev = dst->dev;
|
|
|
|
dev_hold(dev);
|
|
|
|
dst_release(dst);
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
2015-07-30 20:34:54 +00:00
|
|
|
#else
|
2015-10-27 00:37:36 +00:00
|
|
|
static struct net_device *inet6_fib_lookup_dev(struct net *net,
|
|
|
|
const void *addr)
|
2015-07-30 20:34:54 +00:00
|
|
|
{
|
|
|
|
return ERR_PTR(-EAFNOSUPPORT);
|
|
|
|
}
|
|
|
|
#endif
|
2015-07-21 16:16:24 +00:00
|
|
|
|
|
|
|
static struct net_device *find_outdev(struct net *net,
|
2015-10-27 00:37:36 +00:00
|
|
|
struct mpls_route *rt,
|
2015-10-23 13:03:27 +00:00
|
|
|
struct mpls_nh *nh, int oif)
|
2015-07-21 16:16:24 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev = NULL;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
if (!oif) {
|
|
|
|
switch (nh->nh_via_table) {
|
2015-07-21 16:16:24 +00:00
|
|
|
case NEIGH_ARP_TABLE:
|
2015-10-27 00:37:36 +00:00
|
|
|
dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
|
2015-07-21 16:16:24 +00:00
|
|
|
break;
|
|
|
|
case NEIGH_ND_TABLE:
|
2015-10-27 00:37:36 +00:00
|
|
|
dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
|
2015-07-21 16:16:24 +00:00
|
|
|
break;
|
|
|
|
case NEIGH_LINK_TABLE:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-23 13:03:27 +00:00
|
|
|
dev = dev_get_by_index(net, oif);
|
2015-07-21 16:16:24 +00:00
|
|
|
}
|
|
|
|
|
2015-08-04 13:36:24 +00:00
|
|
|
if (!dev)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
2016-04-08 04:28:38 +00:00
|
|
|
if (IS_ERR(dev))
|
|
|
|
return dev;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
/* The caller is holding rtnl anyways, so release the dev reference */
|
|
|
|
dev_put(dev);
|
|
|
|
|
2015-07-21 16:16:24 +00:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
|
|
|
|
struct mpls_nh *nh, int oif)
|
2015-10-23 13:03:27 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev = NULL;
|
|
|
|
int err = -ENODEV;
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
dev = find_outdev(net, rt, nh, oif);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
err = PTR_ERR(dev);
|
|
|
|
dev = NULL;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure this is a supported device */
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!mpls_dev_get(dev))
|
|
|
|
goto errout;
|
|
|
|
|
2015-12-10 19:30:48 +00:00
|
|
|
if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
|
|
|
|
(dev->addr_len != nh->nh_via_alen))
|
|
|
|
goto errout;
|
|
|
|
|
2021-11-29 06:15:06 +00:00
|
|
|
nh->nh_dev = dev;
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
if (!(dev->flags & IFF_UP)) {
|
|
|
|
nh->nh_flags |= RTNH_F_DEAD;
|
|
|
|
} else {
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
flags = dev_get_flags(dev);
|
|
|
|
if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
|
|
|
|
nh->nh_flags |= RTNH_F_LINKDOWN;
|
|
|
|
}
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-27 22:19:32 +00:00
|
|
|
static int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
|
|
|
|
u8 via_addr[], struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct rtvia *via = nla_data(nla);
|
|
|
|
int err = -EINVAL;
|
|
|
|
int alen;
|
|
|
|
|
|
|
|
if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"Invalid attribute length for RTA_VIA");
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
alen = nla_len(nla) -
|
|
|
|
offsetof(struct rtvia, rtvia_addr);
|
|
|
|
if (alen > MAX_VIA_ALEN) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"Invalid address length for RTA_VIA");
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate the address family */
|
|
|
|
switch (via->rtvia_family) {
|
|
|
|
case AF_PACKET:
|
|
|
|
*via_table = NEIGH_LINK_TABLE;
|
|
|
|
break;
|
|
|
|
case AF_INET:
|
|
|
|
*via_table = NEIGH_ARP_TABLE;
|
|
|
|
if (alen != 4)
|
|
|
|
goto errout;
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
*via_table = NEIGH_ND_TABLE;
|
|
|
|
if (alen != 16)
|
|
|
|
goto errout;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Unsupported address family */
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(via_addr, via->rtvia_addr, alen);
|
|
|
|
*via_alen = alen;
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
|
|
|
|
struct mpls_route *rt)
|
|
|
|
{
|
|
|
|
struct net *net = cfg->rc_nlinfo.nl_net;
|
|
|
|
struct mpls_nh *nh = rt->rt_nh;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!nh)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nh->nh_labels = cfg->rc_output_labels;
|
|
|
|
for (i = 0; i < nh->nh_labels; i++)
|
|
|
|
nh->nh_label[i] = cfg->rc_output_label[i];
|
|
|
|
|
|
|
|
nh->nh_via_table = cfg->rc_via_table;
|
2015-10-27 00:37:36 +00:00
|
|
|
memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
|
2015-10-23 13:03:27 +00:00
|
|
|
nh->nh_via_alen = cfg->rc_via_alen;
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
|
|
|
|
rt->rt_nhn_alive--;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
static int mpls_nh_build(struct net *net, struct mpls_route *rt,
|
2015-12-02 06:18:11 +00:00
|
|
|
struct mpls_nh *nh, int oif, struct nlattr *via,
|
2017-05-27 22:19:31 +00:00
|
|
|
struct nlattr *newdst, u8 max_labels,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-10-23 13:03:27 +00:00
|
|
|
{
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!nh)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
if (newdst) {
|
2017-05-27 22:19:29 +00:00
|
|
|
err = nla_get_labels(newdst, max_labels, &nh->nh_labels,
|
2017-05-27 22:19:31 +00:00
|
|
|
nh->nh_label, extack);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2015-12-10 19:30:51 +00:00
|
|
|
if (via) {
|
|
|
|
err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
|
2017-05-27 22:19:31 +00:00
|
|
|
__mpls_nh_via(rt, nh), extack);
|
2015-12-10 19:30:51 +00:00
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
} else {
|
|
|
|
nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
|
|
|
|
}
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
err = mpls_nh_assign_dev(net, rt, nh, oif);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-03-31 14:14:00 +00:00
|
|
|
static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
|
2017-03-31 14:14:03 +00:00
|
|
|
u8 cfg_via_alen, u8 *max_via_alen,
|
|
|
|
u8 *max_labels)
|
2015-10-23 13:03:27 +00:00
|
|
|
{
|
|
|
|
int remaining = len;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 nhs = 0;
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
*max_via_alen = 0;
|
2017-03-31 14:14:03 +00:00
|
|
|
*max_labels = 0;
|
2015-10-27 00:37:36 +00:00
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
while (rtnh_ok(rtnh, remaining)) {
|
2015-10-27 00:37:36 +00:00
|
|
|
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
|
|
|
|
int attrlen;
|
2017-03-31 14:14:03 +00:00
|
|
|
u8 n_labels = 0;
|
2015-10-27 00:37:36 +00:00
|
|
|
|
|
|
|
attrlen = rtnh_attrlen(rtnh);
|
|
|
|
nla = nla_find(attrs, attrlen, RTA_VIA);
|
|
|
|
if (nla && nla_len(nla) >=
|
|
|
|
offsetof(struct rtvia, rtvia_addr)) {
|
|
|
|
int via_alen = nla_len(nla) -
|
|
|
|
offsetof(struct rtvia, rtvia_addr);
|
|
|
|
|
|
|
|
if (via_alen <= MAX_VIA_ALEN)
|
|
|
|
*max_via_alen = max_t(u16, *max_via_alen,
|
|
|
|
via_alen);
|
|
|
|
}
|
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
nla = nla_find(attrs, attrlen, RTA_NEWDST);
|
|
|
|
if (nla &&
|
2017-05-27 22:19:29 +00:00
|
|
|
nla_get_labels(nla, MAX_NEW_LABELS, &n_labels,
|
|
|
|
NULL, NULL) != 0)
|
2017-03-31 14:14:03 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
*max_labels = max_t(u8, *max_labels, n_labels);
|
|
|
|
|
2017-03-31 14:14:00 +00:00
|
|
|
/* number of nexthops is tracked by a u8.
|
|
|
|
* Check for overflow.
|
|
|
|
*/
|
|
|
|
if (nhs == 255)
|
|
|
|
return 0;
|
2015-10-23 13:03:27 +00:00
|
|
|
nhs++;
|
2017-03-31 14:14:00 +00:00
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
rtnh = rtnh_next(rtnh, &remaining);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* leftover implies invalid nexthop configuration, discard it */
|
|
|
|
return remaining > 0 ? 0 : nhs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_nh_build_multi(struct mpls_route_config *cfg,
|
2017-05-27 22:19:31 +00:00
|
|
|
struct mpls_route *rt, u8 max_labels,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-10-23 13:03:27 +00:00
|
|
|
{
|
|
|
|
struct rtnexthop *rtnh = cfg->rc_mp;
|
|
|
|
struct nlattr *nla_via, *nla_newdst;
|
|
|
|
int remaining = cfg->rc_mp_len;
|
|
|
|
int err = 0;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 nhs = 0;
|
2015-10-23 13:03:27 +00:00
|
|
|
|
|
|
|
change_nexthops(rt) {
|
|
|
|
int attrlen;
|
|
|
|
|
|
|
|
nla_via = NULL;
|
|
|
|
nla_newdst = NULL;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!rtnh_ok(rtnh, remaining))
|
|
|
|
goto errout;
|
|
|
|
|
2015-10-23 13:03:28 +00:00
|
|
|
/* neither weighted multipath nor any flags
|
|
|
|
* are supported
|
|
|
|
*/
|
|
|
|
if (rtnh->rtnh_hops || rtnh->rtnh_flags)
|
|
|
|
goto errout;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
attrlen = rtnh_attrlen(rtnh);
|
|
|
|
if (attrlen > 0) {
|
|
|
|
struct nlattr *attrs = rtnh_attrs(rtnh);
|
|
|
|
|
|
|
|
nla_via = nla_find(attrs, attrlen, RTA_VIA);
|
|
|
|
nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
|
|
|
|
}
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
|
2017-03-31 14:14:03 +00:00
|
|
|
rtnh->rtnh_ifindex, nla_via, nla_newdst,
|
2017-05-27 22:19:31 +00:00
|
|
|
max_labels, extack);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
|
|
|
|
rt->rt_nhn_alive--;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
rtnh = rtnh_next(rtnh, &remaining);
|
|
|
|
nhs++;
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
|
|
|
|
rt->rt_nhn = nhs;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-02-08 06:34:24 +00:00
|
|
|
static bool mpls_label_ok(struct net *net, unsigned int *index,
|
2017-05-27 22:19:31 +00:00
|
|
|
struct netlink_ext_ack *extack)
|
2017-05-27 22:19:30 +00:00
|
|
|
{
|
2018-02-08 06:34:24 +00:00
|
|
|
bool is_ok = true;
|
|
|
|
|
2017-05-27 22:19:30 +00:00
|
|
|
/* Reserved labels may not be set */
|
2018-02-08 06:34:24 +00:00
|
|
|
if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
|
2017-05-27 22:19:31 +00:00
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
|
2018-02-08 06:34:24 +00:00
|
|
|
is_ok = false;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2017-05-27 22:19:30 +00:00
|
|
|
|
|
|
|
/* The full 20 bit range may not be supported. */
|
2018-02-08 06:34:24 +00:00
|
|
|
if (is_ok && *index >= net->mpls.platform_labels) {
|
2017-05-27 22:19:31 +00:00
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Label >= configured maximum in platform_labels");
|
2018-02-08 06:34:24 +00:00
|
|
|
is_ok = false;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2017-05-27 22:19:30 +00:00
|
|
|
|
2018-02-08 06:34:24 +00:00
|
|
|
*index = array_index_nospec(*index, net->mpls.platform_labels);
|
|
|
|
return is_ok;
|
2017-05-27 22:19:30 +00:00
|
|
|
}
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
static int mpls_route_add(struct mpls_route_config *cfg,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-03-04 01:12:40 +00:00
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
2015-03-04 01:12:40 +00:00
|
|
|
struct net *net = cfg->rc_nlinfo.nl_net;
|
|
|
|
struct mpls_route *rt, *old;
|
|
|
|
int err = -EINVAL;
|
2015-10-27 00:37:36 +00:00
|
|
|
u8 max_via_alen;
|
2015-10-23 13:03:27 +00:00
|
|
|
unsigned index;
|
2017-03-31 14:14:03 +00:00
|
|
|
u8 max_labels;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 nhs;
|
2015-03-04 01:12:40 +00:00
|
|
|
|
|
|
|
index = cfg->rc_label;
|
|
|
|
|
|
|
|
/* If a label was not specified during insert pick one */
|
|
|
|
if ((index == LABEL_NOT_SPECIFIED) &&
|
|
|
|
(cfg->rc_nlflags & NLM_F_CREATE)) {
|
|
|
|
index = find_free_label(net);
|
|
|
|
}
|
|
|
|
|
2018-02-08 06:34:24 +00:00
|
|
|
if (!mpls_label_ok(net, &index, extack))
|
2015-03-04 01:12:40 +00:00
|
|
|
goto errout;
|
|
|
|
|
|
|
|
/* Append makes no sense with mpls */
|
2015-03-07 22:22:40 +00:00
|
|
|
err = -EOPNOTSUPP;
|
2017-05-27 22:19:31 +00:00
|
|
|
if (cfg->rc_nlflags & NLM_F_APPEND) {
|
|
|
|
NL_SET_ERR_MSG(extack, "MPLS does not support route append");
|
2015-03-04 01:12:40 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2015-03-04 01:12:40 +00:00
|
|
|
|
|
|
|
err = -EEXIST;
|
2015-03-07 22:21:56 +00:00
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
old = rtnl_dereference(platform_label[index]);
|
2015-03-04 01:12:40 +00:00
|
|
|
if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -EEXIST;
|
|
|
|
if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
|
|
|
|
goto errout;
|
|
|
|
|
2015-10-27 00:37:36 +00:00
|
|
|
err = -EINVAL;
|
2017-03-31 14:14:03 +00:00
|
|
|
if (cfg->rc_mp) {
|
|
|
|
nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
|
|
|
|
cfg->rc_via_alen, &max_via_alen,
|
|
|
|
&max_labels);
|
|
|
|
} else {
|
|
|
|
max_via_alen = cfg->rc_via_alen;
|
|
|
|
max_labels = cfg->rc_output_labels;
|
|
|
|
nhs = 1;
|
|
|
|
}
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
if (nhs == 0) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Route does not contain a nexthop");
|
2015-10-27 00:37:36 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
|
2017-03-31 14:14:02 +00:00
|
|
|
if (IS_ERR(rt)) {
|
|
|
|
err = PTR_ERR(rt);
|
2015-03-04 01:12:40 +00:00
|
|
|
goto errout;
|
2017-03-31 14:14:02 +00:00
|
|
|
}
|
2015-03-04 01:12:40 +00:00
|
|
|
|
|
|
|
rt->rt_protocol = cfg->rc_protocol;
|
2015-08-06 10:04:56 +00:00
|
|
|
rt->rt_payload_type = cfg->rc_payload_type;
|
2017-03-10 20:43:24 +00:00
|
|
|
rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
|
2015-03-04 01:12:40 +00:00
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
if (cfg->rc_mp)
|
2017-05-27 22:19:31 +00:00
|
|
|
err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
|
2015-10-23 13:03:27 +00:00
|
|
|
else
|
|
|
|
err = mpls_nh_build_from_cfg(cfg, rt);
|
|
|
|
if (err)
|
|
|
|
goto freert;
|
|
|
|
|
|
|
|
mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
|
2015-03-04 01:12:40 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
freert:
|
|
|
|
mpls_rt_free(rt);
|
2015-03-04 01:12:40 +00:00
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
static int mpls_route_del(struct mpls_route_config *cfg,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-03-04 01:12:40 +00:00
|
|
|
{
|
|
|
|
struct net *net = cfg->rc_nlinfo.nl_net;
|
|
|
|
unsigned index;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
index = cfg->rc_label;
|
|
|
|
|
2018-02-08 06:34:24 +00:00
|
|
|
if (!mpls_label_ok(net, &index, extack))
|
2015-03-04 01:12:40 +00:00
|
|
|
goto errout;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
|
2015-03-04 01:12:40 +00:00
|
|
|
|
|
|
|
err = 0;
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
static void mpls_get_stats(struct mpls_dev *mdev,
|
|
|
|
struct mpls_link_stats *stats)
|
|
|
|
{
|
|
|
|
struct mpls_pcpu_stats *p;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(stats, 0, sizeof(*stats));
|
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
struct mpls_link_stats local;
|
|
|
|
unsigned int start;
|
|
|
|
|
|
|
|
p = per_cpu_ptr(mdev->stats, i);
|
|
|
|
do {
|
2022-10-26 13:22:15 +00:00
|
|
|
start = u64_stats_fetch_begin(&p->syncp);
|
2017-01-16 14:16:37 +00:00
|
|
|
local = p->stats;
|
2022-10-26 13:22:15 +00:00
|
|
|
} while (u64_stats_fetch_retry(&p->syncp, start));
|
2017-01-16 14:16:37 +00:00
|
|
|
|
|
|
|
stats->rx_packets += local.rx_packets;
|
|
|
|
stats->rx_bytes += local.rx_bytes;
|
|
|
|
stats->tx_packets += local.tx_packets;
|
|
|
|
stats->tx_bytes += local.tx_bytes;
|
|
|
|
stats->rx_errors += local.rx_errors;
|
|
|
|
stats->tx_errors += local.tx_errors;
|
|
|
|
stats->rx_dropped += local.rx_dropped;
|
|
|
|
stats->tx_dropped += local.tx_dropped;
|
|
|
|
stats->rx_noroute += local.rx_noroute;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_fill_stats_af(struct sk_buff *skb,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mpls_link_stats *stats;
|
|
|
|
struct mpls_dev *mdev;
|
|
|
|
struct nlattr *nla;
|
|
|
|
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (!mdev)
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
|
|
|
|
sizeof(struct mpls_link_stats),
|
|
|
|
MPLS_STATS_UNSPEC);
|
|
|
|
if (!nla)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
stats = nla_data(nla);
|
|
|
|
mpls_get_stats(mdev, stats);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t mpls_get_stats_af_size(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mpls_dev *mdev;
|
|
|
|
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (!mdev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return nla_total_size_64bit(sizeof(struct mpls_link_stats));
|
|
|
|
}
|
|
|
|
|
2017-02-20 16:03:30 +00:00
|
|
|
static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
|
|
|
|
u32 portid, u32 seq, int event,
|
|
|
|
unsigned int flags, int type)
|
|
|
|
{
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct netconfmsg *ncm;
|
|
|
|
bool all = false;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
|
|
|
|
flags);
|
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (type == NETCONFA_ALL)
|
|
|
|
all = true;
|
|
|
|
|
|
|
|
ncm = nlmsg_data(nlh);
|
|
|
|
ncm->ncm_family = AF_MPLS;
|
|
|
|
|
|
|
|
if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if ((all || type == NETCONFA_INPUT) &&
|
|
|
|
nla_put_s32(skb, NETCONFA_INPUT,
|
2024-04-10 11:19:50 +00:00
|
|
|
READ_ONCE(mdev->input_enabled)) < 0)
|
2017-02-20 16:03:30 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_netconf_msgsize_devconf(int type)
|
|
|
|
{
|
|
|
|
int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
|
|
|
|
+ nla_total_size(4); /* NETCONFA_IFINDEX */
|
|
|
|
bool all = false;
|
|
|
|
|
|
|
|
if (type == NETCONFA_ALL)
|
|
|
|
all = true;
|
|
|
|
|
|
|
|
if (all || type == NETCONFA_INPUT)
|
|
|
|
size += nla_total_size(4);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2017-03-28 21:28:06 +00:00
|
|
|
static void mpls_netconf_notify_devconf(struct net *net, int event,
|
|
|
|
int type, struct mpls_dev *mdev)
|
2017-02-20 16:03:30 +00:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = -ENOBUFS;
|
|
|
|
|
|
|
|
skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
goto errout;
|
|
|
|
|
2017-03-28 21:28:06 +00:00
|
|
|
err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
|
2017-02-20 16:03:30 +00:00
|
|
|
if (err < 0) {
|
|
|
|
/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
|
|
|
|
return;
|
|
|
|
errout:
|
2024-08-22 04:32:52 +00:00
|
|
|
rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
|
2017-02-20 16:03:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
|
|
|
|
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
|
|
|
|
};
|
|
|
|
|
2019-01-18 18:46:26 +00:00
|
|
|
static int mpls_netconf_valid_get_req(struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct nlattr **tb,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Invalid header for netconf get request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!netlink_strict_get_check(skb))
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
|
|
|
|
tb, NETCONFA_MAX,
|
|
|
|
devconf_mpls_policy, extack);
|
2019-01-18 18:46:26 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
|
|
|
|
tb, NETCONFA_MAX,
|
|
|
|
devconf_mpls_policy, extack);
|
2019-01-18 18:46:26 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i <= NETCONFA_MAX; i++) {
|
|
|
|
if (!tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case NETCONFA_IFINDEX:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-20 16:03:30 +00:00
|
|
|
static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
|
2017-04-16 16:48:24 +00:00
|
|
|
struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2017-02-20 16:03:30 +00:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(in_skb->sk);
|
|
|
|
struct nlattr *tb[NETCONFA_MAX + 1];
|
|
|
|
struct net_device *dev;
|
|
|
|
struct mpls_dev *mdev;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int ifindex;
|
|
|
|
int err;
|
|
|
|
|
2019-01-18 18:46:26 +00:00
|
|
|
err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
|
2017-02-20 16:03:30 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!tb[NETCONFA_IFINDEX])
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
|
|
|
|
dev = __dev_get_by_index(net, ifindex);
|
|
|
|
if (!dev)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (!mdev)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -ENOBUFS;
|
|
|
|
skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = mpls_netconf_fill_devconf(skb, mdev,
|
|
|
|
NETLINK_CB(in_skb).portid,
|
|
|
|
nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
|
|
|
|
NETCONFA_ALL);
|
|
|
|
if (err < 0) {
|
|
|
|
/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_netconf_dump_devconf(struct sk_buff *skb,
|
|
|
|
struct netlink_callback *cb)
|
|
|
|
{
|
2018-10-08 03:16:41 +00:00
|
|
|
const struct nlmsghdr *nlh = cb->nlh;
|
2017-02-20 16:03:30 +00:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2024-04-10 11:19:50 +00:00
|
|
|
struct {
|
|
|
|
unsigned long ifindex;
|
|
|
|
} *ctx = (void *)cb->ctx;
|
2017-02-20 16:03:30 +00:00
|
|
|
struct net_device *dev;
|
|
|
|
struct mpls_dev *mdev;
|
2024-04-10 11:19:50 +00:00
|
|
|
int err = 0;
|
2017-02-20 16:03:30 +00:00
|
|
|
|
2018-10-08 03:16:41 +00:00
|
|
|
if (cb->strict_check) {
|
|
|
|
struct netlink_ext_ack *extack = cb->extack;
|
|
|
|
struct netconfmsg *ncm;
|
|
|
|
|
|
|
|
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-10 11:19:50 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
for_each_netdev_dump(net, dev, ctx->ifindex) {
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (!mdev)
|
|
|
|
continue;
|
|
|
|
err = mpls_netconf_fill_devconf(skb, mdev,
|
|
|
|
NETLINK_CB(cb->skb).portid,
|
|
|
|
nlh->nlmsg_seq,
|
|
|
|
RTM_NEWNETCONF,
|
|
|
|
NLM_F_MULTI,
|
|
|
|
NETCONFA_ALL);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
2017-02-20 16:03:30 +00:00
|
|
|
}
|
2024-04-10 11:19:50 +00:00
|
|
|
rcu_read_unlock();
|
2017-02-20 16:03:30 +00:00
|
|
|
|
2024-04-10 11:19:50 +00:00
|
|
|
return err;
|
2017-02-20 16:03:30 +00:00
|
|
|
}
|
|
|
|
|
2015-04-22 10:14:38 +00:00
|
|
|
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
|
|
|
|
(&((struct mpls_dev *)0)->field)
|
|
|
|
|
sysctl: treewide: constify the ctl_table argument of proc_handlers
const qualify the struct ctl_table argument in the proc_handler function
signatures. This is a prerequisite to moving the static ctl_table
structs into .rodata data which will ensure that proc_handler function
pointers cannot be modified.
This patch has been generated by the following coccinelle script:
```
virtual patch
@r1@
identifier ctl, write, buffer, lenp, ppos;
identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)";
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos);
@r2@
identifier func, ctl, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos)
{ ... }
@r3@
identifier func;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int , void *, size_t *, loff_t *);
@r4@
identifier func, ctl;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int , void *, size_t *, loff_t *);
@r5@
identifier func, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int write, void *buffer, size_t *lenp, loff_t *ppos);
```
* Code formatting was adjusted in xfs_sysctl.c to comply with code
conventions. The xfs_stats_clear_proc_handler,
xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where
adjusted.
* The ctl_table argument in proc_watchdog_common was const qualified.
This is called from a proc_handler itself and is calling back into
another proc_handler, making it necessary to change it as part of the
proc_handler migration.
Co-developed-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Co-developed-by: Joel Granados <j.granados@samsung.com>
Signed-off-by: Joel Granados <j.granados@samsung.com>
2024-07-24 18:59:29 +00:00
|
|
|
static int mpls_conf_proc(const struct ctl_table *ctl, int write,
|
2020-04-24 06:43:38 +00:00
|
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
2017-02-20 16:03:30 +00:00
|
|
|
{
|
|
|
|
int oval = *(int *)ctl->data;
|
|
|
|
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
|
|
|
|
|
|
|
if (write) {
|
|
|
|
struct mpls_dev *mdev = ctl->extra1;
|
|
|
|
int i = (int *)ctl->data - (int *)mdev;
|
|
|
|
struct net *net = ctl->extra2;
|
|
|
|
int val = *(int *)ctl->data;
|
|
|
|
|
|
|
|
if (i == offsetof(struct mpls_dev, input_enabled) &&
|
|
|
|
val != oval) {
|
2017-03-28 21:28:06 +00:00
|
|
|
mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
|
|
|
|
NETCONFA_INPUT, mdev);
|
2017-02-20 16:03:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-04-22 10:14:38 +00:00
|
|
|
static const struct ctl_table mpls_dev_table[] = {
|
|
|
|
{
|
|
|
|
.procname = "input",
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
2017-02-20 16:03:30 +00:00
|
|
|
.proc_handler = mpls_conf_proc,
|
2015-04-22 10:14:38 +00:00
|
|
|
.data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mpls_dev_sysctl_register(struct net_device *dev,
|
|
|
|
struct mpls_dev *mdev)
|
|
|
|
{
|
|
|
|
char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
|
2024-05-01 09:29:25 +00:00
|
|
|
size_t table_size = ARRAY_SIZE(mpls_dev_table);
|
2017-02-20 16:03:30 +00:00
|
|
|
struct net *net = dev_net(dev);
|
2015-04-22 10:14:38 +00:00
|
|
|
struct ctl_table *table;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
|
|
|
|
if (!table)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Table data contains only offsets relative to the base of
|
|
|
|
* the mdev at this point, so make them absolute.
|
|
|
|
*/
|
2024-05-01 09:29:25 +00:00
|
|
|
for (i = 0; i < table_size; i++) {
|
2015-04-22 10:14:38 +00:00
|
|
|
table[i].data = (char *)mdev + (uintptr_t)table[i].data;
|
2017-02-20 16:03:30 +00:00
|
|
|
table[i].extra1 = mdev;
|
|
|
|
table[i].extra2 = net;
|
|
|
|
}
|
2015-04-22 10:14:38 +00:00
|
|
|
|
|
|
|
snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
|
|
|
|
|
2024-05-01 09:29:25 +00:00
|
|
|
mdev->sysctl = register_net_sysctl_sz(net, path, table, table_size);
|
2015-04-22 10:14:38 +00:00
|
|
|
if (!mdev->sysctl)
|
|
|
|
goto free;
|
|
|
|
|
2017-03-28 21:28:07 +00:00
|
|
|
mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
|
2015-04-22 10:14:38 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free:
|
|
|
|
kfree(table);
|
|
|
|
out:
|
2023-02-14 06:53:55 +00:00
|
|
|
mdev->sysctl = NULL;
|
2015-04-22 10:14:38 +00:00
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2017-03-28 21:28:07 +00:00
|
|
|
static void mpls_dev_sysctl_unregister(struct net_device *dev,
|
|
|
|
struct mpls_dev *mdev)
|
2015-04-22 10:14:38 +00:00
|
|
|
{
|
2017-03-28 21:28:07 +00:00
|
|
|
struct net *net = dev_net(dev);
|
2024-04-18 09:40:08 +00:00
|
|
|
const struct ctl_table *table;
|
2015-04-22 10:14:38 +00:00
|
|
|
|
2023-02-14 06:53:55 +00:00
|
|
|
if (!mdev->sysctl)
|
|
|
|
return;
|
|
|
|
|
2015-04-22 10:14:38 +00:00
|
|
|
table = mdev->sysctl->ctl_table_arg;
|
|
|
|
unregister_net_sysctl_table(mdev->sysctl);
|
|
|
|
kfree(table);
|
2017-03-28 21:28:07 +00:00
|
|
|
|
|
|
|
mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
|
2015-04-22 10:14:38 +00:00
|
|
|
}
|
|
|
|
|
2015-04-22 10:14:37 +00:00
|
|
|
static struct mpls_dev *mpls_add_dev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mpls_dev *mdev;
|
|
|
|
int err = -ENOMEM;
|
2017-01-16 14:16:37 +00:00
|
|
|
int i;
|
2015-04-22 10:14:37 +00:00
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
|
|
|
|
if (!mdev)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
|
|
|
|
if (!mdev->stats)
|
|
|
|
goto free;
|
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
struct mpls_pcpu_stats *mpls_stats;
|
|
|
|
|
|
|
|
mpls_stats = per_cpu_ptr(mdev->stats, i);
|
|
|
|
u64_stats_init(&mpls_stats->syncp);
|
|
|
|
}
|
|
|
|
|
2017-03-28 21:28:07 +00:00
|
|
|
mdev->dev = dev;
|
|
|
|
|
2015-04-22 10:14:38 +00:00
|
|
|
err = mpls_dev_sysctl_register(dev, mdev);
|
|
|
|
if (err)
|
|
|
|
goto free;
|
|
|
|
|
2015-04-22 10:14:37 +00:00
|
|
|
rcu_assign_pointer(dev->mpls_ptr, mdev);
|
|
|
|
|
|
|
|
return mdev;
|
2015-04-22 10:14:38 +00:00
|
|
|
|
|
|
|
free:
|
2017-01-16 14:16:37 +00:00
|
|
|
free_percpu(mdev->stats);
|
2015-04-22 10:14:38 +00:00
|
|
|
kfree(mdev);
|
|
|
|
return ERR_PTR(err);
|
2015-04-22 10:14:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
static void mpls_dev_destroy_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
|
|
|
|
|
|
|
|
free_percpu(mdev->stats);
|
|
|
|
kfree(mdev);
|
|
|
|
}
|
|
|
|
|
2021-11-29 06:15:05 +00:00
|
|
|
static int mpls_ifdown(struct net_device *dev, int event)
|
2015-03-04 01:10:47 +00:00
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
2015-03-04 01:10:47 +00:00
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
unsigned index;
|
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
2015-03-04 01:10:47 +00:00
|
|
|
for (index = 0; index < net->mpls.platform_labels; index++) {
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
|
2021-11-29 06:15:05 +00:00
|
|
|
bool nh_del = false;
|
|
|
|
u8 alive = 0;
|
2015-12-02 06:18:11 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
if (!rt)
|
|
|
|
continue;
|
2015-12-02 06:18:11 +00:00
|
|
|
|
2021-11-29 06:15:05 +00:00
|
|
|
if (event == NETDEV_UNREGISTER) {
|
|
|
|
u8 deleted = 0;
|
|
|
|
|
|
|
|
for_nexthops(rt) {
|
2021-11-29 06:15:06 +00:00
|
|
|
if (!nh->nh_dev || nh->nh_dev == dev)
|
2021-11-29 06:15:05 +00:00
|
|
|
deleted++;
|
2021-11-29 06:15:06 +00:00
|
|
|
if (nh->nh_dev == dev)
|
2021-11-29 06:15:05 +00:00
|
|
|
nh_del = true;
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
|
|
|
|
/* if there are no more nexthops, delete the route */
|
|
|
|
if (deleted == rt->rt_nhn) {
|
|
|
|
mpls_route_update(net, index, NULL, NULL);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nh_del) {
|
|
|
|
size_t size = sizeof(*rt) + rt->rt_nhn *
|
|
|
|
rt->rt_nh_size;
|
|
|
|
struct mpls_route *orig = rt;
|
|
|
|
|
2022-04-06 11:46:29 +00:00
|
|
|
rt = kmemdup(orig, size, GFP_KERNEL);
|
2021-11-29 06:15:05 +00:00
|
|
|
if (!rt)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
change_nexthops(rt) {
|
2017-03-31 14:13:59 +00:00
|
|
|
unsigned int nh_flags = nh->nh_flags;
|
|
|
|
|
2021-11-29 06:15:06 +00:00
|
|
|
if (nh->nh_dev != dev)
|
2017-03-13 23:49:10 +00:00
|
|
|
goto next;
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
switch (event) {
|
|
|
|
case NETDEV_DOWN:
|
|
|
|
case NETDEV_UNREGISTER:
|
2017-03-31 14:13:59 +00:00
|
|
|
nh_flags |= RTNH_F_DEAD;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2015-12-02 06:18:11 +00:00
|
|
|
case NETDEV_CHANGE:
|
2017-03-31 14:13:59 +00:00
|
|
|
nh_flags |= RTNH_F_LINKDOWN;
|
2015-12-02 06:18:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (event == NETDEV_UNREGISTER)
|
2021-11-29 06:15:06 +00:00
|
|
|
nh->nh_dev = NULL;
|
2017-03-31 14:13:59 +00:00
|
|
|
|
|
|
|
if (nh->nh_flags != nh_flags)
|
|
|
|
WRITE_ONCE(nh->nh_flags, nh_flags);
|
2017-03-13 23:49:10 +00:00
|
|
|
next:
|
2017-03-31 14:13:59 +00:00
|
|
|
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
|
2017-03-13 23:49:10 +00:00
|
|
|
alive++;
|
2015-10-23 13:03:27 +00:00
|
|
|
} endfor_nexthops(rt);
|
2017-03-13 23:49:10 +00:00
|
|
|
|
|
|
|
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
2017-03-24 22:21:57 +00:00
|
|
|
|
2021-11-29 06:15:05 +00:00
|
|
|
if (nh_del)
|
|
|
|
mpls_route_update(net, index, rt, NULL);
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
2021-11-29 06:15:05 +00:00
|
|
|
|
|
|
|
return 0;
|
2015-12-02 06:18:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-31 14:13:59 +00:00
|
|
|
static void mpls_ifup(struct net_device *dev, unsigned int flags)
|
2015-12-02 06:18:11 +00:00
|
|
|
{
|
|
|
|
struct mpls_route __rcu **platform_label;
|
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
unsigned index;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 alive;
|
2015-12-02 06:18:11 +00:00
|
|
|
|
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
for (index = 0; index < net->mpls.platform_labels; index++) {
|
|
|
|
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
|
|
|
|
|
|
|
|
if (!rt)
|
|
|
|
continue;
|
2015-04-22 10:14:38 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
alive = 0;
|
|
|
|
change_nexthops(rt) {
|
2017-03-31 14:13:59 +00:00
|
|
|
unsigned int nh_flags = nh->nh_flags;
|
2015-04-22 10:14:37 +00:00
|
|
|
|
2017-03-31 14:13:59 +00:00
|
|
|
if (!(nh_flags & flags)) {
|
2015-12-02 06:18:11 +00:00
|
|
|
alive++;
|
|
|
|
continue;
|
|
|
|
}
|
2021-11-29 06:15:06 +00:00
|
|
|
if (nh->nh_dev != dev)
|
2015-12-02 06:18:11 +00:00
|
|
|
continue;
|
|
|
|
alive++;
|
2017-03-31 14:13:59 +00:00
|
|
|
nh_flags &= ~flags;
|
2017-05-31 06:50:36 +00:00
|
|
|
WRITE_ONCE(nh->nh_flags, nh_flags);
|
2015-12-02 06:18:11 +00:00
|
|
|
} endfor_nexthops(rt);
|
|
|
|
|
2017-03-31 14:13:59 +00:00
|
|
|
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
2015-12-02 06:18:11 +00:00
|
|
|
}
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
2015-04-22 10:14:37 +00:00
|
|
|
struct mpls_dev *mdev;
|
2015-12-02 06:18:11 +00:00
|
|
|
unsigned int flags;
|
2022-02-10 00:28:38 +00:00
|
|
|
int err;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
if (event == NETDEV_REGISTER) {
|
2020-07-27 07:39:19 +00:00
|
|
|
mdev = mpls_add_dev(dev);
|
|
|
|
if (IS_ERR(mdev))
|
|
|
|
return notifier_from_errno(PTR_ERR(mdev));
|
mpls: allow routes on ip6gre devices
Summary:
This appears to be necessary and sufficient change to enable `MPLS` on
`ip6gre` tunnels (RFC4023).
This diff allows IP6GRE devices to be recognized by MPLS kernel module
and hence user can configure interface to accept packets with mpls
headers as well setup mpls routes on them.
Test Plan:
Test plan consists of multiple containers connected via GRE-V6 tunnel.
Then carrying out testing steps as below.
- Carry out necessary sysctl settings on all containers
```
sysctl -w net.mpls.platform_labels=65536
sysctl -w net.mpls.ip_ttl_propagate=1
sysctl -w net.mpls.conf.lo.input=1
```
- Establish IP6GRE tunnels
```
ip -6 tunnel add name if_1_2_1 mode ip6gre \
local 2401:db00:21:6048:feed:0::1 \
remote 2401:db00:21:6048:feed:0::2 key 1
ip link set dev if_1_2_1 up
sysctl -w net.mpls.conf.if_1_2_1.input=1
ip -4 addr add 169.254.0.2/31 dev if_1_2_1 scope link
ip -6 tunnel add name if_1_3_1 mode ip6gre \
local 2401:db00:21:6048:feed:0::1 \
remote 2401:db00:21:6048:feed:0::3 key 1
ip link set dev if_1_3_1 up
sysctl -w net.mpls.conf.if_1_3_1.input=1
ip -4 addr add 169.254.0.4/31 dev if_1_3_1 scope link
```
- Install MPLS encap rules on node-1 towards node-2
```
ip route add 192.168.0.11/32 nexthop encap mpls 32/64 \
via inet 169.254.0.3 dev if_1_2_1
```
- Install MPLS forwarding rules on node-2 and node-3
```
// node2
ip -f mpls route add 32 via inet 169.254.0.7 dev if_2_4_1
// node3
ip -f mpls route add 64 via inet 169.254.0.12 dev if_4_3_1
```
- Ping 192.168.0.11 (node4) from 192.168.0.1 (node1) (where routing
towards 192.168.0.1 is via IP route directly towards node1 from node4)
```
ping 192.168.0.11
```
- tcpdump on interface to capture ping packets wrapped within MPLS
header which inturn wrapped within IP6GRE header
```
16:43:41.121073 IP6
2401:db00:21:6048:feed::1 > 2401:db00:21:6048:feed::2:
DSTOPT GREv0, key=0x1, length 100:
MPLS (label 32, exp 0, ttl 255) (label 64, exp 0, [S], ttl 255)
IP 192.168.0.1 > 192.168.0.11:
ICMP echo request, id 1208, seq 45, length 64
0x0000: 6000 2cdb 006c 3c3f 2401 db00 0021 6048 `.,..l<?$....!`H
0x0010: feed 0000 0000 0001 2401 db00 0021 6048 ........$....!`H
0x0020: feed 0000 0000 0002 2f00 0401 0401 0100 ......../.......
0x0030: 2000 8847 0000 0001 0002 00ff 0004 01ff ...G............
0x0040: 4500 0054 3280 4000 ff01 c7cb c0a8 0001 E..T2.@.........
0x0050: c0a8 000b 0800 a8d7 04b8 002d 2d3c a05b ...........--<.[
0x0060: 0000 0000 bcd8 0100 0000 0000 1011 1213 ................
0x0070: 1415 1617 1819 1a1b 1c1d 1e1f 2021 2223 .............!"#
0x0080: 2425 2627 2829 2a2b 2c2d 2e2f 3031 3233 $%&'()*+,-./0123
0x0090: 3435 3637 4567
```
Signed-off-by: Saif Hasan <has@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-21 21:30:05 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
2015-04-22 10:14:37 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (!mdev)
|
|
|
|
return NOTIFY_OK;
|
|
|
|
|
|
|
|
switch (event) {
|
2021-11-29 06:15:05 +00:00
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
case NETDEV_DOWN:
|
2021-11-29 06:15:05 +00:00
|
|
|
err = mpls_ifdown(dev, event);
|
|
|
|
if (err)
|
|
|
|
return notifier_from_errno(err);
|
2015-12-02 06:18:11 +00:00
|
|
|
break;
|
|
|
|
case NETDEV_UP:
|
|
|
|
flags = dev_get_flags(dev);
|
|
|
|
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
|
|
|
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
|
|
|
|
else
|
|
|
|
mpls_ifup(dev, RTNH_F_DEAD);
|
|
|
|
break;
|
|
|
|
case NETDEV_CHANGE:
|
|
|
|
flags = dev_get_flags(dev);
|
2021-11-29 06:15:05 +00:00
|
|
|
if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
|
2015-12-02 06:18:11 +00:00
|
|
|
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
|
2021-11-29 06:15:05 +00:00
|
|
|
} else {
|
|
|
|
err = mpls_ifdown(dev, event);
|
|
|
|
if (err)
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
2015-12-02 06:18:11 +00:00
|
|
|
break;
|
2015-03-04 01:10:47 +00:00
|
|
|
case NETDEV_UNREGISTER:
|
2021-11-29 06:15:05 +00:00
|
|
|
err = mpls_ifdown(dev, event);
|
|
|
|
if (err)
|
|
|
|
return notifier_from_errno(err);
|
2015-12-02 06:18:11 +00:00
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (mdev) {
|
2017-03-28 21:28:07 +00:00
|
|
|
mpls_dev_sysctl_unregister(dev, mdev);
|
2015-12-02 06:18:11 +00:00
|
|
|
RCU_INIT_POINTER(dev->mpls_ptr, NULL);
|
2017-01-16 14:16:37 +00:00
|
|
|
call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
|
2015-12-02 06:18:11 +00:00
|
|
|
}
|
2015-03-04 01:10:47 +00:00
|
|
|
break;
|
2015-06-11 18:58:26 +00:00
|
|
|
case NETDEV_CHANGENAME:
|
|
|
|
mdev = mpls_dev_get(dev);
|
|
|
|
if (mdev) {
|
2017-03-28 21:28:07 +00:00
|
|
|
mpls_dev_sysctl_unregister(dev, mdev);
|
2015-06-11 18:58:26 +00:00
|
|
|
err = mpls_dev_sysctl_register(dev, mdev);
|
|
|
|
if (err)
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
break;
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block mpls_dev_notifier = {
|
|
|
|
.notifier_call = mpls_dev_notify,
|
|
|
|
};
|
|
|
|
|
2015-03-04 01:13:56 +00:00
|
|
|
static int nla_put_via(struct sk_buff *skb,
|
2015-03-07 22:25:56 +00:00
|
|
|
u8 table, const void *addr, int alen)
|
2015-03-04 01:13:56 +00:00
|
|
|
{
|
2015-03-07 22:25:56 +00:00
|
|
|
static const int table_to_family[NEIGH_NR_TABLES + 1] = {
|
2024-10-14 23:52:16 +00:00
|
|
|
AF_INET, AF_INET6, AF_PACKET,
|
2015-03-07 22:25:56 +00:00
|
|
|
};
|
2015-03-04 01:13:56 +00:00
|
|
|
struct nlattr *nla;
|
|
|
|
struct rtvia *via;
|
2015-03-07 22:25:56 +00:00
|
|
|
int family = AF_UNSPEC;
|
2015-03-04 01:13:56 +00:00
|
|
|
|
|
|
|
nla = nla_reserve(skb, RTA_VIA, alen + 2);
|
|
|
|
if (!nla)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2015-03-07 22:25:56 +00:00
|
|
|
if (table <= NEIGH_NR_TABLES)
|
|
|
|
family = table_to_family[table];
|
|
|
|
|
2015-03-04 01:13:56 +00:00
|
|
|
via = nla_data(nla);
|
|
|
|
via->rtvia_family = family;
|
|
|
|
memcpy(via->rtvia_addr, addr, alen);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:13:19 +00:00
|
|
|
int nla_put_labels(struct sk_buff *skb, int attrtype,
|
|
|
|
u8 labels, const u32 label[])
|
|
|
|
{
|
|
|
|
struct nlattr *nla;
|
|
|
|
struct mpls_shim_hdr *nla_label;
|
|
|
|
bool bos;
|
|
|
|
int i;
|
|
|
|
nla = nla_reserve(skb, attrtype, labels*4);
|
|
|
|
if (!nla)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
nla_label = nla_data(nla);
|
|
|
|
bos = true;
|
|
|
|
for (i = labels - 1; i >= 0; i--) {
|
|
|
|
nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
|
|
|
|
bos = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-07-21 08:43:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nla_put_labels);
|
2015-03-04 01:13:19 +00:00
|
|
|
|
2017-05-27 22:19:29 +00:00
|
|
|
int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
|
|
|
|
u32 label[], struct netlink_ext_ack *extack)
|
2015-03-04 01:13:19 +00:00
|
|
|
{
|
|
|
|
unsigned len = nla_len(nla);
|
|
|
|
struct mpls_shim_hdr *nla_label;
|
2017-03-31 14:14:03 +00:00
|
|
|
u8 nla_labels;
|
2015-03-04 01:13:19 +00:00
|
|
|
bool bos;
|
|
|
|
int i;
|
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
/* len needs to be an even multiple of 4 (the label size). Number
|
|
|
|
* of labels is a u8 so check for overflow.
|
|
|
|
*/
|
2017-05-27 22:19:29 +00:00
|
|
|
if (len & 3 || len / 4 > 255) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"Invalid length for labels attribute");
|
2015-03-04 01:13:19 +00:00
|
|
|
return -EINVAL;
|
2017-05-27 22:19:29 +00:00
|
|
|
}
|
2015-03-04 01:13:19 +00:00
|
|
|
|
|
|
|
/* Limit the number of new labels allowed */
|
|
|
|
nla_labels = len/4;
|
2017-05-27 22:19:29 +00:00
|
|
|
if (nla_labels > max_labels) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Too many labels");
|
2015-03-04 01:13:19 +00:00
|
|
|
return -EINVAL;
|
2017-05-27 22:19:29 +00:00
|
|
|
}
|
2015-03-04 01:13:19 +00:00
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
/* when label == NULL, caller wants number of labels */
|
|
|
|
if (!label)
|
|
|
|
goto out;
|
|
|
|
|
2015-03-04 01:13:19 +00:00
|
|
|
nla_label = nla_data(nla);
|
|
|
|
bos = true;
|
|
|
|
for (i = nla_labels - 1; i >= 0; i--, bos = false) {
|
|
|
|
struct mpls_entry_decoded dec;
|
|
|
|
dec = mpls_entry_decode(nla_label + i);
|
|
|
|
|
|
|
|
/* Ensure the bottom of stack flag is properly set
|
|
|
|
* and ttl and tc are both clear.
|
|
|
|
*/
|
2017-05-27 22:19:29 +00:00
|
|
|
if (dec.ttl) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"TTL in label must be 0");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dec.tc) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"Traffic class in label must be 0");
|
2015-03-04 01:13:19 +00:00
|
|
|
return -EINVAL;
|
2017-05-27 22:19:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dec.bos != bos) {
|
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
if (bos) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"BOS bit must be set in first label");
|
|
|
|
} else {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"BOS bit can only be set in first label");
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-03-04 01:13:19 +00:00
|
|
|
|
2015-04-22 10:14:39 +00:00
|
|
|
switch (dec.label) {
|
2015-05-07 15:08:51 +00:00
|
|
|
case MPLS_LABEL_IMPLNULL:
|
2015-04-22 10:14:39 +00:00
|
|
|
/* RFC3032: This is a label that an LSR may
|
|
|
|
* assign and distribute, but which never
|
|
|
|
* actually appears in the encapsulation.
|
|
|
|
*/
|
2017-05-27 22:19:29 +00:00
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"Implicit NULL Label (3) can not be used in encapsulation");
|
2015-04-22 10:14:39 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:13:19 +00:00
|
|
|
label[i] = dec.label;
|
|
|
|
}
|
2017-03-31 14:14:03 +00:00
|
|
|
out:
|
2015-03-04 01:13:19 +00:00
|
|
|
*labels = nla_labels;
|
|
|
|
return 0;
|
|
|
|
}
|
2015-07-21 08:43:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nla_get_labels);
|
2015-03-04 01:13:19 +00:00
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
static int rtm_to_route_config(struct sk_buff *skb,
|
|
|
|
struct nlmsghdr *nlh,
|
|
|
|
struct mpls_route_config *cfg,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-03-04 01:13:56 +00:00
|
|
|
{
|
|
|
|
struct rtmsg *rtm;
|
|
|
|
struct nlattr *tb[RTA_MAX+1];
|
|
|
|
int index;
|
|
|
|
int err;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
|
|
|
|
rtm_mpls_policy, extack);
|
2015-03-04 01:13:56 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
rtm = nlmsg_data(nlh);
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
if (rtm->rtm_family != AF_MPLS) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid address family in rtmsg");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_dst_len != 20) {
|
|
|
|
NL_SET_ERR_MSG(extack, "rtm_dst_len must be 20 for MPLS");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_src_len != 0) {
|
|
|
|
NL_SET_ERR_MSG(extack, "rtm_src_len must be 0 for MPLS");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_tos != 0) {
|
|
|
|
NL_SET_ERR_MSG(extack, "rtm_tos must be 0 for MPLS");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_table != RT_TABLE_MAIN) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"MPLS only supports the main route table");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2015-03-04 01:13:56 +00:00
|
|
|
/* Any value is acceptable for rtm_protocol */
|
|
|
|
|
|
|
|
/* As mpls uses destination specific addresses
|
|
|
|
* (or source specific address in the case of multicast)
|
|
|
|
* all addresses have universal scope.
|
|
|
|
*/
|
2017-05-27 22:19:31 +00:00
|
|
|
if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Invalid route scope - MPLS only supports UNIVERSE");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_type != RTN_UNICAST) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Invalid route type - MPLS only supports UNICAST");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
|
|
|
if (rtm->rtm_flags != 0) {
|
|
|
|
NL_SET_ERR_MSG(extack, "rtm_flags must be 0 for MPLS");
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2015-03-04 01:13:56 +00:00
|
|
|
|
|
|
|
cfg->rc_label = LABEL_NOT_SPECIFIED;
|
|
|
|
cfg->rc_protocol = rtm->rtm_protocol;
|
2015-12-10 19:30:50 +00:00
|
|
|
cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
|
2017-03-10 20:43:24 +00:00
|
|
|
cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
|
2015-03-04 01:13:56 +00:00
|
|
|
cfg->rc_nlflags = nlh->nlmsg_flags;
|
|
|
|
cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
|
|
|
|
cfg->rc_nlinfo.nlh = nlh;
|
|
|
|
cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
|
|
|
|
|
|
|
|
for (index = 0; index <= RTA_MAX; index++) {
|
|
|
|
struct nlattr *nla = tb[index];
|
|
|
|
if (!nla)
|
|
|
|
continue;
|
|
|
|
|
2016-12-03 07:59:26 +00:00
|
|
|
switch (index) {
|
2015-03-04 01:13:56 +00:00
|
|
|
case RTA_OIF:
|
|
|
|
cfg->rc_ifindex = nla_get_u32(nla);
|
|
|
|
break;
|
|
|
|
case RTA_NEWDST:
|
|
|
|
if (nla_get_labels(nla, MAX_NEW_LABELS,
|
|
|
|
&cfg->rc_output_labels,
|
2017-05-27 22:19:31 +00:00
|
|
|
cfg->rc_output_label, extack))
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
|
|
|
break;
|
|
|
|
case RTA_DST:
|
|
|
|
{
|
2015-10-23 13:03:27 +00:00
|
|
|
u8 label_count;
|
2015-03-04 01:13:56 +00:00
|
|
|
if (nla_get_labels(nla, 1, &label_count,
|
2017-05-27 22:19:31 +00:00
|
|
|
&cfg->rc_label, extack))
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
|
|
|
|
2017-05-27 22:19:30 +00:00
|
|
|
if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
|
2018-02-08 06:34:24 +00:00
|
|
|
&cfg->rc_label, extack))
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-26 17:00:04 +00:00
|
|
|
case RTA_GATEWAY:
|
|
|
|
NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
|
|
|
|
goto errout;
|
2015-03-04 01:13:56 +00:00
|
|
|
case RTA_VIA:
|
|
|
|
{
|
2015-10-23 13:03:27 +00:00
|
|
|
if (nla_get_via(nla, &cfg->rc_via_alen,
|
2017-05-27 22:19:31 +00:00
|
|
|
&cfg->rc_via_table, cfg->rc_via,
|
|
|
|
extack))
|
2015-03-04 01:13:56 +00:00
|
|
|
goto errout;
|
2015-10-23 13:03:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case RTA_MULTIPATH:
|
|
|
|
{
|
|
|
|
cfg->rc_mp = nla_data(nla);
|
|
|
|
cfg->rc_mp_len = nla_len(nla);
|
2015-03-04 01:13:56 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-03-10 20:43:24 +00:00
|
|
|
case RTA_TTL_PROPAGATE:
|
|
|
|
{
|
|
|
|
u8 ttl_propagate = nla_get_u8(nla);
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
if (ttl_propagate > 1) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla,
|
|
|
|
"RTA_TTL_PROPAGATE can only be 0 or 1");
|
2017-03-10 20:43:24 +00:00
|
|
|
goto errout;
|
2017-05-27 22:19:31 +00:00
|
|
|
}
|
2017-03-10 20:43:24 +00:00
|
|
|
cfg->rc_ttl_propagate = ttl_propagate ?
|
|
|
|
MPLS_TTL_PROP_ENABLED :
|
|
|
|
MPLS_TTL_PROP_DISABLED;
|
|
|
|
break;
|
|
|
|
}
|
2015-03-04 01:13:56 +00:00
|
|
|
default:
|
2017-05-27 22:19:31 +00:00
|
|
|
NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute");
|
2015-03-04 01:13:56 +00:00
|
|
|
/* Unsupported attribute */
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-16 16:48:24 +00:00
|
|
|
static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-03-04 01:13:56 +00:00
|
|
|
{
|
2017-03-31 14:14:03 +00:00
|
|
|
struct mpls_route_config *cfg;
|
2015-03-04 01:13:56 +00:00
|
|
|
int err;
|
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
|
|
|
if (!cfg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
err = rtm_to_route_config(skb, nlh, cfg, extack);
|
2015-03-04 01:13:56 +00:00
|
|
|
if (err < 0)
|
2017-03-31 14:14:03 +00:00
|
|
|
goto out;
|
2015-03-04 01:13:56 +00:00
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
err = mpls_route_del(cfg, extack);
|
2017-03-31 14:14:03 +00:00
|
|
|
out:
|
|
|
|
kfree(cfg);
|
|
|
|
|
|
|
|
return err;
|
2015-03-04 01:13:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-16 16:48:24 +00:00
|
|
|
static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-03-04 01:13:56 +00:00
|
|
|
{
|
2017-03-31 14:14:03 +00:00
|
|
|
struct mpls_route_config *cfg;
|
2015-03-04 01:13:56 +00:00
|
|
|
int err;
|
|
|
|
|
2017-03-31 14:14:03 +00:00
|
|
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
|
|
|
if (!cfg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
err = rtm_to_route_config(skb, nlh, cfg, extack);
|
2015-03-04 01:13:56 +00:00
|
|
|
if (err < 0)
|
2017-03-31 14:14:03 +00:00
|
|
|
goto out;
|
2015-03-04 01:13:56 +00:00
|
|
|
|
2017-05-27 22:19:31 +00:00
|
|
|
err = mpls_route_add(cfg, extack);
|
2017-03-31 14:14:03 +00:00
|
|
|
out:
|
|
|
|
kfree(cfg);
|
|
|
|
|
|
|
|
return err;
|
2015-03-04 01:13:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
|
|
|
u32 label, struct mpls_route *rt, int flags)
|
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct net_device *dev;
|
2015-03-04 01:13:56 +00:00
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct rtmsg *rtm;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
|
|
|
|
if (nlh == NULL)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
rtm = nlmsg_data(nlh);
|
|
|
|
rtm->rtm_family = AF_MPLS;
|
|
|
|
rtm->rtm_dst_len = 20;
|
|
|
|
rtm->rtm_src_len = 0;
|
|
|
|
rtm->rtm_tos = 0;
|
|
|
|
rtm->rtm_table = RT_TABLE_MAIN;
|
|
|
|
rtm->rtm_protocol = rt->rt_protocol;
|
|
|
|
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
|
|
|
|
rtm->rtm_type = RTN_UNICAST;
|
|
|
|
rtm->rtm_flags = 0;
|
|
|
|
|
|
|
|
if (nla_put_labels(skb, RTA_DST, 1, &label))
|
|
|
|
goto nla_put_failure;
|
2017-03-10 20:43:24 +00:00
|
|
|
|
|
|
|
if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
|
|
|
|
bool ttl_propagate =
|
|
|
|
rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
|
|
|
|
ttl_propagate))
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2015-10-23 13:03:27 +00:00
|
|
|
if (rt->rt_nhn == 1) {
|
2015-10-27 00:37:36 +00:00
|
|
|
const struct mpls_nh *nh = rt->rt_nh;
|
2015-10-23 13:03:27 +00:00
|
|
|
|
|
|
|
if (nh->nh_labels &&
|
|
|
|
nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
|
|
|
|
nh->nh_label))
|
|
|
|
goto nla_put_failure;
|
2015-12-10 19:30:50 +00:00
|
|
|
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
|
2015-12-10 19:30:49 +00:00
|
|
|
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
|
2015-10-23 13:03:27 +00:00
|
|
|
nh->nh_via_alen))
|
|
|
|
goto nla_put_failure;
|
2021-11-29 06:15:06 +00:00
|
|
|
dev = nh->nh_dev;
|
2015-10-23 13:03:27 +00:00
|
|
|
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
|
|
|
|
goto nla_put_failure;
|
2015-12-02 06:18:11 +00:00
|
|
|
if (nh->nh_flags & RTNH_F_LINKDOWN)
|
|
|
|
rtm->rtm_flags |= RTNH_F_LINKDOWN;
|
|
|
|
if (nh->nh_flags & RTNH_F_DEAD)
|
|
|
|
rtm->rtm_flags |= RTNH_F_DEAD;
|
2015-10-23 13:03:27 +00:00
|
|
|
} else {
|
|
|
|
struct rtnexthop *rtnh;
|
|
|
|
struct nlattr *mp;
|
2017-03-31 14:14:00 +00:00
|
|
|
u8 linkdown = 0;
|
|
|
|
u8 dead = 0;
|
2015-10-23 13:03:27 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (!mp)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
for_nexthops(rt) {
|
2021-11-29 06:15:06 +00:00
|
|
|
dev = nh->nh_dev;
|
2017-03-24 22:21:56 +00:00
|
|
|
if (!dev)
|
|
|
|
continue;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
|
|
|
|
if (!rtnh)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2017-03-24 22:21:56 +00:00
|
|
|
rtnh->rtnh_ifindex = dev->ifindex;
|
2015-12-02 06:18:11 +00:00
|
|
|
if (nh->nh_flags & RTNH_F_LINKDOWN) {
|
|
|
|
rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
|
|
|
|
linkdown++;
|
|
|
|
}
|
|
|
|
if (nh->nh_flags & RTNH_F_DEAD) {
|
|
|
|
rtnh->rtnh_flags |= RTNH_F_DEAD;
|
|
|
|
dead++;
|
|
|
|
}
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
|
|
|
|
nh->nh_labels,
|
|
|
|
nh->nh_label))
|
|
|
|
goto nla_put_failure;
|
2015-12-10 19:30:51 +00:00
|
|
|
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
|
|
|
|
nla_put_via(skb, nh->nh_via_table,
|
2015-10-27 00:37:36 +00:00
|
|
|
mpls_nh_via(rt, nh),
|
2015-10-23 13:03:27 +00:00
|
|
|
nh->nh_via_alen))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
/* length of rtnetlink header + attributes */
|
|
|
|
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
|
2015-12-02 06:18:11 +00:00
|
|
|
if (linkdown == rt->rt_nhn)
|
|
|
|
rtm->rtm_flags |= RTNH_F_LINKDOWN;
|
|
|
|
if (dead == rt->rt_nhn)
|
|
|
|
rtm->rtm_flags |= RTNH_F_DEAD;
|
|
|
|
|
2015-10-23 13:03:27 +00:00
|
|
|
nla_nest_end(skb, mp);
|
|
|
|
}
|
2015-03-04 01:13:56 +00:00
|
|
|
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2018-10-09 18:10:43 +00:00
|
|
|
#if IS_ENABLED(CONFIG_INET)
|
2018-10-16 01:56:42 +00:00
|
|
|
static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
|
|
|
|
struct fib_dump_filter *filter,
|
2018-10-16 01:56:48 +00:00
|
|
|
struct netlink_callback *cb)
|
2018-10-09 18:10:43 +00:00
|
|
|
{
|
2018-10-16 01:56:48 +00:00
|
|
|
return ip_valid_fib_dump_req(net, nlh, filter, cb);
|
2018-10-09 18:10:43 +00:00
|
|
|
}
|
|
|
|
#else
|
2018-10-16 01:56:42 +00:00
|
|
|
static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
|
|
|
|
struct fib_dump_filter *filter,
|
2018-10-16 01:56:48 +00:00
|
|
|
struct netlink_callback *cb)
|
2018-10-09 18:10:43 +00:00
|
|
|
{
|
2018-10-16 01:56:48 +00:00
|
|
|
struct netlink_ext_ack *extack = cb->extack;
|
2018-10-16 01:56:49 +00:00
|
|
|
struct nlattr *tb[RTA_MAX + 1];
|
2018-10-09 18:10:43 +00:00
|
|
|
struct rtmsg *rtm;
|
2018-10-16 01:56:49 +00:00
|
|
|
int err, i;
|
2018-10-09 18:10:43 +00:00
|
|
|
|
|
|
|
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtm = nlmsg_data(nlh);
|
|
|
|
if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
|
2018-10-16 01:56:49 +00:00
|
|
|
rtm->rtm_table || rtm->rtm_scope || rtm->rtm_type ||
|
|
|
|
rtm->rtm_flags) {
|
2018-10-09 18:10:43 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-10-16 01:56:49 +00:00
|
|
|
if (rtm->rtm_protocol) {
|
|
|
|
filter->protocol = rtm->rtm_protocol;
|
|
|
|
filter->filter_set = 1;
|
|
|
|
cb->answer_flags = NLM_F_DUMP_FILTERED;
|
|
|
|
}
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
|
|
|
|
rtm_mpls_policy, extack);
|
2018-10-16 01:56:49 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i <= RTA_MAX; ++i) {
|
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
if (i == RTA_OIF) {
|
|
|
|
ifindex = nla_get_u32(tb[i]);
|
|
|
|
filter->dev = __dev_get_by_index(net, ifindex);
|
|
|
|
if (!filter->dev)
|
|
|
|
return -ENODEV;
|
|
|
|
filter->filter_set = 1;
|
|
|
|
} else if (tb[i]) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-10-09 18:10:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-10-16 01:56:45 +00:00
|
|
|
static bool mpls_rt_uses_dev(struct mpls_route *rt,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
if (rt->rt_nhn == 1) {
|
|
|
|
struct mpls_nh *nh = rt->rt_nh;
|
|
|
|
|
2021-11-29 06:15:06 +00:00
|
|
|
if (nh->nh_dev == dev)
|
2018-10-16 01:56:45 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
for_nexthops(rt) {
|
2021-11-29 06:15:06 +00:00
|
|
|
if (nh->nh_dev == dev)
|
2018-10-16 01:56:45 +00:00
|
|
|
return true;
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:13:56 +00:00
|
|
|
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
2018-10-08 03:16:35 +00:00
|
|
|
const struct nlmsghdr *nlh = cb->nlh;
|
2015-03-04 01:13:56 +00:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
2024-02-22 10:50:17 +00:00
|
|
|
struct fib_dump_filter filter = {
|
|
|
|
.rtnl_held = true,
|
|
|
|
};
|
2018-10-16 01:56:45 +00:00
|
|
|
unsigned int flags = NLM_F_MULTI;
|
2015-03-07 22:21:56 +00:00
|
|
|
size_t platform_labels;
|
2015-03-04 01:13:56 +00:00
|
|
|
unsigned int index;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2018-10-08 03:16:35 +00:00
|
|
|
if (cb->strict_check) {
|
2018-10-16 01:56:42 +00:00
|
|
|
int err;
|
2018-10-08 03:16:35 +00:00
|
|
|
|
2018-10-16 01:56:48 +00:00
|
|
|
err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
|
2018-10-08 03:16:35 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2018-10-16 01:56:45 +00:00
|
|
|
|
|
|
|
/* for MPLS, there is only 1 table with fixed type and flags.
|
|
|
|
* If either are set in the filter then return nothing.
|
|
|
|
*/
|
|
|
|
if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
|
|
|
|
(filter.rt_type && filter.rt_type != RTN_UNICAST) ||
|
|
|
|
filter.flags)
|
|
|
|
return skb->len;
|
2018-10-08 03:16:35 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 01:13:56 +00:00
|
|
|
index = cb->args[0];
|
2015-08-03 16:50:04 +00:00
|
|
|
if (index < MPLS_LABEL_FIRST_UNRESERVED)
|
|
|
|
index = MPLS_LABEL_FIRST_UNRESERVED;
|
2015-03-04 01:13:56 +00:00
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
platform_labels = net->mpls.platform_labels;
|
2018-10-16 01:56:45 +00:00
|
|
|
|
|
|
|
if (filter.filter_set)
|
|
|
|
flags |= NLM_F_DUMP_FILTERED;
|
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
for (; index < platform_labels; index++) {
|
2015-03-04 01:13:56 +00:00
|
|
|
struct mpls_route *rt;
|
2018-10-16 01:56:45 +00:00
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
rt = rtnl_dereference(platform_label[index]);
|
2015-03-04 01:13:56 +00:00
|
|
|
if (!rt)
|
|
|
|
continue;
|
|
|
|
|
2018-10-16 01:56:45 +00:00
|
|
|
if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
|
|
|
|
(filter.protocol && rt->rt_protocol != filter.protocol))
|
|
|
|
continue;
|
|
|
|
|
2015-03-04 01:13:56 +00:00
|
|
|
if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
|
2018-10-16 01:56:45 +00:00
|
|
|
index, rt, flags) < 0)
|
2015-03-04 01:13:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
cb->args[0] = index;
|
|
|
|
|
|
|
|
return skb->len;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:14:31 +00:00
|
|
|
static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
|
|
|
|
{
|
|
|
|
size_t payload =
|
|
|
|
NLMSG_ALIGN(sizeof(struct rtmsg))
|
2017-03-10 20:43:24 +00:00
|
|
|
+ nla_total_size(4) /* RTA_DST */
|
|
|
|
+ nla_total_size(1); /* RTA_TTL_PROPAGATE */
|
2015-10-23 13:03:27 +00:00
|
|
|
|
|
|
|
if (rt->rt_nhn == 1) {
|
|
|
|
struct mpls_nh *nh = rt->rt_nh;
|
|
|
|
|
|
|
|
if (nh->nh_dev)
|
|
|
|
payload += nla_total_size(4); /* RTA_OIF */
|
2015-12-10 19:30:50 +00:00
|
|
|
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
|
2015-12-10 19:30:49 +00:00
|
|
|
payload += nla_total_size(2 + nh->nh_via_alen);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (nh->nh_labels) /* RTA_NEWDST */
|
|
|
|
payload += nla_total_size(nh->nh_labels * 4);
|
|
|
|
} else {
|
|
|
|
/* each nexthop is packed in an attribute */
|
|
|
|
size_t nhsize = 0;
|
|
|
|
|
|
|
|
for_nexthops(rt) {
|
2021-11-29 06:15:06 +00:00
|
|
|
if (!nh->nh_dev)
|
2017-03-28 22:19:49 +00:00
|
|
|
continue;
|
2015-10-23 13:03:27 +00:00
|
|
|
nhsize += nla_total_size(sizeof(struct rtnexthop));
|
2015-12-10 19:30:51 +00:00
|
|
|
/* RTA_VIA */
|
|
|
|
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
|
|
|
|
nhsize += nla_total_size(2 + nh->nh_via_alen);
|
2015-10-23 13:03:27 +00:00
|
|
|
if (nh->nh_labels)
|
|
|
|
nhsize += nla_total_size(nh->nh_labels * 4);
|
|
|
|
} endfor_nexthops(rt);
|
|
|
|
/* nested attribute */
|
|
|
|
payload += nla_total_size(nhsize);
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:14:31 +00:00
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
|
|
|
|
struct nlmsghdr *nlh, struct net *net, u32 portid,
|
|
|
|
unsigned int nlm_flags)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u32 seq = nlh ? nlh->nlmsg_seq : 0;
|
|
|
|
int err = -ENOBUFS;
|
|
|
|
|
|
|
|
skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
|
|
|
|
if (skb == NULL)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
|
|
|
|
if (err < 0) {
|
|
|
|
/* -EMSGSIZE implies BUG in lfib_nlmsg_size */
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
|
|
|
|
|
|
|
|
return;
|
|
|
|
errout:
|
2024-08-22 04:32:52 +00:00
|
|
|
rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
|
2015-03-04 01:14:31 +00:00
|
|
|
}
|
|
|
|
|
2019-01-18 18:46:25 +00:00
|
|
|
static int mpls_valid_getroute_req(struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct nlattr **tb,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct rtmsg *rtm;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Invalid header for get route request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!netlink_strict_get_check(skb))
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
|
|
|
|
rtm_mpls_policy, extack);
|
2019-01-18 18:46:25 +00:00
|
|
|
|
|
|
|
rtm = nlmsg_data(nlh);
|
|
|
|
if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
|
|
|
|
rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
|
|
|
|
rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Invalid flags for get route request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
|
|
|
|
rtm_mpls_policy, extack);
|
2019-01-18 18:46:25 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i <= RTA_MAX; i++) {
|
|
|
|
if (!tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case RTA_DST:
|
|
|
|
case RTA_NEWDST:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-03 22:31:21 +00:00
|
|
|
static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net(in_skb->sk);
|
|
|
|
u32 portid = NETLINK_CB(in_skb).portid;
|
2017-07-07 18:21:49 +00:00
|
|
|
u32 in_label = LABEL_NOT_SPECIFIED;
|
2017-07-03 22:31:21 +00:00
|
|
|
struct nlattr *tb[RTA_MAX + 1];
|
|
|
|
u32 labels[MAX_NEW_LABELS];
|
|
|
|
struct mpls_shim_hdr *hdr;
|
|
|
|
unsigned int hdr_size = 0;
|
2021-11-29 06:23:16 +00:00
|
|
|
const struct mpls_nh *nh;
|
2017-07-03 22:31:21 +00:00
|
|
|
struct net_device *dev;
|
|
|
|
struct mpls_route *rt;
|
|
|
|
struct rtmsg *rtm, *r;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u8 n_labels;
|
2017-07-07 18:21:49 +00:00
|
|
|
int err;
|
2017-07-03 22:31:21 +00:00
|
|
|
|
2019-01-18 18:46:25 +00:00
|
|
|
err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
|
2017-07-03 22:31:21 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
rtm = nlmsg_data(in_nlh);
|
|
|
|
|
|
|
|
if (tb[RTA_DST]) {
|
|
|
|
u8 label_count;
|
|
|
|
|
|
|
|
if (nla_get_labels(tb[RTA_DST], 1, &label_count,
|
2017-07-07 18:21:49 +00:00
|
|
|
&in_label, extack)) {
|
|
|
|
err = -EINVAL;
|
2017-07-03 22:31:21 +00:00
|
|
|
goto errout;
|
2017-07-07 18:21:49 +00:00
|
|
|
}
|
2017-07-03 22:31:21 +00:00
|
|
|
|
2018-02-08 06:34:24 +00:00
|
|
|
if (!mpls_label_ok(net, &in_label, extack)) {
|
2017-07-07 18:21:49 +00:00
|
|
|
err = -EINVAL;
|
2017-07-03 22:31:21 +00:00
|
|
|
goto errout;
|
2017-07-07 18:21:49 +00:00
|
|
|
}
|
2017-07-03 22:31:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rt = mpls_route_input_rcu(net, in_label);
|
|
|
|
if (!rt) {
|
|
|
|
err = -ENETUNREACH;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
|
|
|
|
skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
|
|
|
|
if (!skb) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mpls_dump_route(skb, portid, in_nlh->nlmsg_seq,
|
|
|
|
RTM_NEWROUTE, in_label, rt, 0);
|
|
|
|
if (err < 0) {
|
|
|
|
/* -EMSGSIZE implies BUG in lfib_nlmsg_size */
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
goto errout_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rtnl_unicast(skb, net, portid);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[RTA_NEWDST]) {
|
|
|
|
if (nla_get_labels(tb[RTA_NEWDST], MAX_NEW_LABELS, &n_labels,
|
|
|
|
labels, extack) != 0) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr_size = n_labels * sizeof(struct mpls_shim_hdr);
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
|
|
|
if (!skb) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_MPLS_UC);
|
|
|
|
|
|
|
|
if (hdr_size) {
|
|
|
|
bool bos;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (skb_cow(skb, hdr_size)) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto errout_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_reserve(skb, hdr_size);
|
|
|
|
skb_push(skb, hdr_size);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
|
|
|
|
/* Push new labels */
|
|
|
|
hdr = mpls_hdr(skb);
|
|
|
|
bos = true;
|
|
|
|
for (i = n_labels - 1; i >= 0; i--) {
|
|
|
|
hdr[i] = mpls_entry_encode(labels[i],
|
|
|
|
1, 0, bos);
|
|
|
|
bos = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nh = mpls_select_multipath(rt, skb);
|
|
|
|
if (!nh) {
|
|
|
|
err = -ENETUNREACH;
|
|
|
|
goto errout_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr_size) {
|
|
|
|
skb_pull(skb, hdr_size);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, portid, in_nlh->nlmsg_seq,
|
|
|
|
RTM_NEWROUTE, sizeof(*r), 0);
|
|
|
|
if (!nlh) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto errout_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = nlmsg_data(nlh);
|
|
|
|
r->rtm_family = AF_MPLS;
|
|
|
|
r->rtm_dst_len = 20;
|
|
|
|
r->rtm_src_len = 0;
|
|
|
|
r->rtm_table = RT_TABLE_MAIN;
|
|
|
|
r->rtm_type = RTN_UNICAST;
|
|
|
|
r->rtm_scope = RT_SCOPE_UNIVERSE;
|
|
|
|
r->rtm_protocol = rt->rt_protocol;
|
|
|
|
r->rtm_flags = 0;
|
|
|
|
|
|
|
|
if (nla_put_labels(skb, RTA_DST, 1, &in_label))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nh->nh_labels &&
|
|
|
|
nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
|
|
|
|
nh->nh_label))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
|
|
|
|
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
|
|
|
|
nh->nh_via_alen))
|
|
|
|
goto nla_put_failure;
|
2021-11-29 06:15:06 +00:00
|
|
|
dev = nh->nh_dev;
|
2017-07-03 22:31:21 +00:00
|
|
|
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
|
|
|
|
err = rtnl_unicast(skb, net, portid);
|
|
|
|
errout:
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
errout_free:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-03-04 01:11:20 +00:00
|
|
|
static int resize_platform_label_table(struct net *net, size_t limit)
|
|
|
|
{
|
|
|
|
size_t size = sizeof(struct mpls_route *) * limit;
|
|
|
|
size_t old_limit;
|
|
|
|
size_t cp_size;
|
|
|
|
struct mpls_route __rcu **labels = NULL, **old;
|
|
|
|
struct mpls_route *rt0 = NULL, *rt2 = NULL;
|
|
|
|
unsigned index;
|
|
|
|
|
|
|
|
if (size) {
|
2017-05-08 22:57:27 +00:00
|
|
|
labels = kvzalloc(size, GFP_KERNEL);
|
2015-03-04 01:11:20 +00:00
|
|
|
if (!labels)
|
|
|
|
goto nolabels;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In case the predefined labels need to be populated */
|
2015-05-07 15:08:51 +00:00
|
|
|
if (limit > MPLS_LABEL_IPV4NULL) {
|
2015-03-04 01:11:20 +00:00
|
|
|
struct net_device *lo = net->loopback_dev;
|
2017-03-31 14:14:03 +00:00
|
|
|
rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
|
2017-03-31 14:14:02 +00:00
|
|
|
if (IS_ERR(rt0))
|
2015-03-04 01:11:20 +00:00
|
|
|
goto nort0;
|
2021-11-29 06:15:06 +00:00
|
|
|
rt0->rt_nh->nh_dev = lo;
|
2015-03-04 01:11:20 +00:00
|
|
|
rt0->rt_protocol = RTPROT_KERNEL;
|
2015-08-06 10:04:56 +00:00
|
|
|
rt0->rt_payload_type = MPT_IPV4;
|
2017-03-10 20:43:24 +00:00
|
|
|
rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
|
2015-10-23 13:03:27 +00:00
|
|
|
rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
|
2015-10-27 00:37:35 +00:00
|
|
|
rt0->rt_nh->nh_via_alen = lo->addr_len;
|
2015-10-27 00:37:36 +00:00
|
|
|
memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
|
|
|
|
lo->addr_len);
|
2015-03-04 01:11:20 +00:00
|
|
|
}
|
2015-05-07 15:08:51 +00:00
|
|
|
if (limit > MPLS_LABEL_IPV6NULL) {
|
2015-03-04 01:11:20 +00:00
|
|
|
struct net_device *lo = net->loopback_dev;
|
2017-03-31 14:14:03 +00:00
|
|
|
rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
|
2017-03-31 14:14:02 +00:00
|
|
|
if (IS_ERR(rt2))
|
2015-03-04 01:11:20 +00:00
|
|
|
goto nort2;
|
2021-11-29 06:15:06 +00:00
|
|
|
rt2->rt_nh->nh_dev = lo;
|
2015-03-04 01:11:20 +00:00
|
|
|
rt2->rt_protocol = RTPROT_KERNEL;
|
2015-08-06 10:04:56 +00:00
|
|
|
rt2->rt_payload_type = MPT_IPV6;
|
2017-03-24 01:02:27 +00:00
|
|
|
rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
|
2015-10-23 13:03:27 +00:00
|
|
|
rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
|
2015-10-27 00:37:35 +00:00
|
|
|
rt2->rt_nh->nh_via_alen = lo->addr_len;
|
2015-10-27 00:37:36 +00:00
|
|
|
memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
|
|
|
|
lo->addr_len);
|
2015-03-04 01:11:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
/* Remember the original table */
|
2015-03-07 22:21:56 +00:00
|
|
|
old = rtnl_dereference(net->mpls.platform_label);
|
2015-03-04 01:11:20 +00:00
|
|
|
old_limit = net->mpls.platform_labels;
|
|
|
|
|
|
|
|
/* Free any labels beyond the new table */
|
|
|
|
for (index = limit; index < old_limit; index++)
|
2015-10-23 13:03:27 +00:00
|
|
|
mpls_route_update(net, index, NULL, NULL);
|
2015-03-04 01:11:20 +00:00
|
|
|
|
|
|
|
/* Copy over the old labels */
|
|
|
|
cp_size = size;
|
|
|
|
if (old_limit < limit)
|
|
|
|
cp_size = old_limit * sizeof(struct mpls_route *);
|
|
|
|
|
|
|
|
memcpy(labels, old, cp_size);
|
|
|
|
|
|
|
|
/* If needed set the predefined labels */
|
2015-05-07 15:08:51 +00:00
|
|
|
if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
|
|
|
|
(limit > MPLS_LABEL_IPV6NULL)) {
|
|
|
|
RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
|
2015-03-04 01:11:20 +00:00
|
|
|
rt2 = NULL;
|
|
|
|
}
|
|
|
|
|
2015-05-07 15:08:51 +00:00
|
|
|
if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
|
|
|
|
(limit > MPLS_LABEL_IPV4NULL)) {
|
|
|
|
RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
|
2015-03-04 01:11:20 +00:00
|
|
|
rt0 = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the global pointers */
|
|
|
|
net->mpls.platform_labels = limit;
|
2015-03-07 22:21:56 +00:00
|
|
|
rcu_assign_pointer(net->mpls.platform_label, labels);
|
2015-03-04 01:11:20 +00:00
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
mpls_rt_free(rt2);
|
|
|
|
mpls_rt_free(rt0);
|
|
|
|
|
|
|
|
if (old) {
|
|
|
|
synchronize_rcu();
|
|
|
|
kvfree(old);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nort2:
|
|
|
|
mpls_rt_free(rt0);
|
|
|
|
nort0:
|
|
|
|
kvfree(labels);
|
|
|
|
nolabels:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
sysctl: treewide: constify the ctl_table argument of proc_handlers
const qualify the struct ctl_table argument in the proc_handler function
signatures. This is a prerequisite to moving the static ctl_table
structs into .rodata data which will ensure that proc_handler function
pointers cannot be modified.
This patch has been generated by the following coccinelle script:
```
virtual patch
@r1@
identifier ctl, write, buffer, lenp, ppos;
identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)";
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos);
@r2@
identifier func, ctl, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos)
{ ... }
@r3@
identifier func;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int , void *, size_t *, loff_t *);
@r4@
identifier func, ctl;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int , void *, size_t *, loff_t *);
@r5@
identifier func, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int write, void *buffer, size_t *lenp, loff_t *ppos);
```
* Code formatting was adjusted in xfs_sysctl.c to comply with code
conventions. The xfs_stats_clear_proc_handler,
xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where
adjusted.
* The ctl_table argument in proc_watchdog_common was const qualified.
This is called from a proc_handler itself and is calling back into
another proc_handler, making it necessary to change it as part of the
proc_handler migration.
Co-developed-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Co-developed-by: Joel Granados <j.granados@samsung.com>
Signed-off-by: Joel Granados <j.granados@samsung.com>
2024-07-24 18:59:29 +00:00
|
|
|
static int mpls_platform_labels(const struct ctl_table *table, int write,
|
2020-04-24 06:43:38 +00:00
|
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
2015-03-04 01:11:20 +00:00
|
|
|
{
|
|
|
|
struct net *net = table->data;
|
|
|
|
int platform_labels = net->mpls.platform_labels;
|
|
|
|
int ret;
|
|
|
|
struct ctl_table tmp = {
|
|
|
|
.procname = table->procname,
|
|
|
|
.data = &platform_labels,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = table->mode,
|
proc/sysctl: add shared variables for range check
In the sysctl code the proc_dointvec_minmax() function is often used to
validate the user supplied value between an allowed range. This
function uses the extra1 and extra2 members from struct ctl_table as
minimum and maximum allowed value.
On sysctl handler declaration, in every source file there are some
readonly variables containing just an integer which address is assigned
to the extra1 and extra2 members, so the sysctl range is enforced.
The special values 0, 1 and INT_MAX are very often used as range
boundary, leading duplication of variables like zero=0, one=1,
int_max=INT_MAX in different source files:
$ git grep -E '\.extra[12].*&(zero|one|int_max)' |wc -l
248
Add a const int array containing the most commonly used values, some
macros to refer more easily to the correct array member, and use them
instead of creating a local one for every object file.
This is the bloat-o-meter output comparing the old and new binary
compiled with the default Fedora config:
# scripts/bloat-o-meter -d vmlinux.o.old vmlinux.o
add/remove: 2/2 grow/shrink: 0/2 up/down: 24/-188 (-164)
Data old new delta
sysctl_vals - 12 +12
__kstrtab_sysctl_vals - 12 +12
max 14 10 -4
int_max 16 - -16
one 68 - -68
zero 128 28 -100
Total: Before=20583249, After=20583085, chg -0.00%
[mcroce@redhat.com: tipc: remove two unused variables]
Link: http://lkml.kernel.org/r/20190530091952.4108-1-mcroce@redhat.com
[akpm@linux-foundation.org: fix net/ipv6/sysctl_net_ipv6.c]
[arnd@arndb.de: proc/sysctl: make firmware loader table conditional]
Link: http://lkml.kernel.org/r/20190617130014.1713870-1-arnd@arndb.de
[akpm@linux-foundation.org: fix fs/eventpoll.c]
Link: http://lkml.kernel.org/r/20190430180111.10688-1-mcroce@redhat.com
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-07-18 22:58:50 +00:00
|
|
|
.extra1 = SYSCTL_ZERO,
|
2015-03-04 01:11:20 +00:00
|
|
|
.extra2 = &label_limit,
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
|
|
|
|
|
|
|
|
if (write && ret == 0)
|
|
|
|
ret = resize_platform_label_table(net, platform_labels);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-10 20:43:24 +00:00
|
|
|
#define MPLS_NS_SYSCTL_OFFSET(field) \
|
|
|
|
(&((struct net *)0)->field)
|
|
|
|
|
2015-04-22 10:14:38 +00:00
|
|
|
static const struct ctl_table mpls_table[] = {
|
2015-03-04 01:11:20 +00:00
|
|
|
{
|
|
|
|
.procname = "platform_labels",
|
|
|
|
.data = NULL,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = mpls_platform_labels,
|
|
|
|
},
|
2017-03-10 20:43:24 +00:00
|
|
|
{
|
|
|
|
.procname = "ip_ttl_propagate",
|
|
|
|
.data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = proc_dointvec_minmax,
|
proc/sysctl: add shared variables for range check
In the sysctl code the proc_dointvec_minmax() function is often used to
validate the user supplied value between an allowed range. This
function uses the extra1 and extra2 members from struct ctl_table as
minimum and maximum allowed value.
On sysctl handler declaration, in every source file there are some
readonly variables containing just an integer which address is assigned
to the extra1 and extra2 members, so the sysctl range is enforced.
The special values 0, 1 and INT_MAX are very often used as range
boundary, leading duplication of variables like zero=0, one=1,
int_max=INT_MAX in different source files:
$ git grep -E '\.extra[12].*&(zero|one|int_max)' |wc -l
248
Add a const int array containing the most commonly used values, some
macros to refer more easily to the correct array member, and use them
instead of creating a local one for every object file.
This is the bloat-o-meter output comparing the old and new binary
compiled with the default Fedora config:
# scripts/bloat-o-meter -d vmlinux.o.old vmlinux.o
add/remove: 2/2 grow/shrink: 0/2 up/down: 24/-188 (-164)
Data old new delta
sysctl_vals - 12 +12
__kstrtab_sysctl_vals - 12 +12
max 14 10 -4
int_max 16 - -16
one 68 - -68
zero 128 28 -100
Total: Before=20583249, After=20583085, chg -0.00%
[mcroce@redhat.com: tipc: remove two unused variables]
Link: http://lkml.kernel.org/r/20190530091952.4108-1-mcroce@redhat.com
[akpm@linux-foundation.org: fix net/ipv6/sysctl_net_ipv6.c]
[arnd@arndb.de: proc/sysctl: make firmware loader table conditional]
Link: http://lkml.kernel.org/r/20190617130014.1713870-1-arnd@arndb.de
[akpm@linux-foundation.org: fix fs/eventpoll.c]
Link: http://lkml.kernel.org/r/20190430180111.10688-1-mcroce@redhat.com
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-07-18 22:58:50 +00:00
|
|
|
.extra1 = SYSCTL_ZERO,
|
|
|
|
.extra2 = SYSCTL_ONE,
|
2017-03-10 20:43:24 +00:00
|
|
|
},
|
2017-03-10 20:43:25 +00:00
|
|
|
{
|
|
|
|
.procname = "default_ttl",
|
|
|
|
.data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = proc_dointvec_minmax,
|
proc/sysctl: add shared variables for range check
In the sysctl code the proc_dointvec_minmax() function is often used to
validate the user supplied value between an allowed range. This
function uses the extra1 and extra2 members from struct ctl_table as
minimum and maximum allowed value.
On sysctl handler declaration, in every source file there are some
readonly variables containing just an integer which address is assigned
to the extra1 and extra2 members, so the sysctl range is enforced.
The special values 0, 1 and INT_MAX are very often used as range
boundary, leading duplication of variables like zero=0, one=1,
int_max=INT_MAX in different source files:
$ git grep -E '\.extra[12].*&(zero|one|int_max)' |wc -l
248
Add a const int array containing the most commonly used values, some
macros to refer more easily to the correct array member, and use them
instead of creating a local one for every object file.
This is the bloat-o-meter output comparing the old and new binary
compiled with the default Fedora config:
# scripts/bloat-o-meter -d vmlinux.o.old vmlinux.o
add/remove: 2/2 grow/shrink: 0/2 up/down: 24/-188 (-164)
Data old new delta
sysctl_vals - 12 +12
__kstrtab_sysctl_vals - 12 +12
max 14 10 -4
int_max 16 - -16
one 68 - -68
zero 128 28 -100
Total: Before=20583249, After=20583085, chg -0.00%
[mcroce@redhat.com: tipc: remove two unused variables]
Link: http://lkml.kernel.org/r/20190530091952.4108-1-mcroce@redhat.com
[akpm@linux-foundation.org: fix net/ipv6/sysctl_net_ipv6.c]
[arnd@arndb.de: proc/sysctl: make firmware loader table conditional]
Link: http://lkml.kernel.org/r/20190617130014.1713870-1-arnd@arndb.de
[akpm@linux-foundation.org: fix fs/eventpoll.c]
Link: http://lkml.kernel.org/r/20190430180111.10688-1-mcroce@redhat.com
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-07-18 22:58:50 +00:00
|
|
|
.extra1 = SYSCTL_ONE,
|
2017-03-10 20:43:25 +00:00
|
|
|
.extra2 = &ttl_max,
|
|
|
|
},
|
2015-03-04 01:11:20 +00:00
|
|
|
};
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
static int mpls_net_init(struct net *net)
|
|
|
|
{
|
2024-05-01 09:29:25 +00:00
|
|
|
size_t table_size = ARRAY_SIZE(mpls_table);
|
2015-03-04 01:11:20 +00:00
|
|
|
struct ctl_table *table;
|
2017-03-10 20:43:24 +00:00
|
|
|
int i;
|
2015-03-04 01:11:20 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
net->mpls.platform_labels = 0;
|
|
|
|
net->mpls.platform_label = NULL;
|
2017-03-10 20:43:24 +00:00
|
|
|
net->mpls.ip_ttl_propagate = 1;
|
2017-03-10 20:43:25 +00:00
|
|
|
net->mpls.default_ttl = 255;
|
2015-03-04 01:10:47 +00:00
|
|
|
|
2015-03-04 01:11:20 +00:00
|
|
|
table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
|
|
|
|
if (table == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-03-10 20:43:24 +00:00
|
|
|
/* Table data contains only offsets relative to the base of
|
|
|
|
* the mdev at this point, so make them absolute.
|
|
|
|
*/
|
2024-05-01 09:29:25 +00:00
|
|
|
for (i = 0; i < table_size; i++)
|
2017-03-10 20:43:24 +00:00
|
|
|
table[i].data = (char *)net + (uintptr_t)table[i].data;
|
|
|
|
|
2023-08-09 10:50:03 +00:00
|
|
|
net->mpls.ctl = register_net_sysctl_sz(net, "net/mpls", table,
|
2024-05-01 09:29:25 +00:00
|
|
|
table_size);
|
2015-08-31 17:44:19 +00:00
|
|
|
if (net->mpls.ctl == NULL) {
|
|
|
|
kfree(table);
|
2015-03-04 01:11:20 +00:00
|
|
|
return -ENOMEM;
|
2015-08-31 17:44:19 +00:00
|
|
|
}
|
2015-03-04 01:11:20 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mpls_net_exit(struct net *net)
|
|
|
|
{
|
2015-03-07 22:21:56 +00:00
|
|
|
struct mpls_route __rcu **platform_label;
|
|
|
|
size_t platform_labels;
|
2024-04-18 09:40:08 +00:00
|
|
|
const struct ctl_table *table;
|
2015-03-04 01:10:47 +00:00
|
|
|
unsigned int index;
|
|
|
|
|
2015-03-04 01:11:20 +00:00
|
|
|
table = net->mpls.ctl->ctl_table_arg;
|
|
|
|
unregister_net_sysctl_table(net->mpls.ctl);
|
|
|
|
kfree(table);
|
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
/* An rcu grace period has passed since there was a device in
|
|
|
|
* the network namespace (and thus the last in flight packet)
|
2015-03-04 01:10:47 +00:00
|
|
|
* left this network namespace. This is because
|
|
|
|
* unregister_netdevice_many and netdev_run_todo has completed
|
|
|
|
* for each network device that was in this network namespace.
|
|
|
|
*
|
|
|
|
* As such no additional rcu synchronization is necessary when
|
|
|
|
* freeing the platform_label table.
|
|
|
|
*/
|
|
|
|
rtnl_lock();
|
2015-03-07 22:21:56 +00:00
|
|
|
platform_label = rtnl_dereference(net->mpls.platform_label);
|
|
|
|
platform_labels = net->mpls.platform_labels;
|
|
|
|
for (index = 0; index < platform_labels; index++) {
|
|
|
|
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
|
|
|
|
RCU_INIT_POINTER(platform_label[index], NULL);
|
2017-03-10 17:46:15 +00:00
|
|
|
mpls_notify_route(net, index, rt, NULL, NULL);
|
2015-03-04 01:10:47 +00:00
|
|
|
mpls_rt_free(rt);
|
|
|
|
}
|
|
|
|
rtnl_unlock();
|
|
|
|
|
2015-03-07 22:21:56 +00:00
|
|
|
kvfree(platform_label);
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations mpls_net_ops = {
|
|
|
|
.init = mpls_net_init,
|
|
|
|
.exit = mpls_net_exit,
|
|
|
|
};
|
|
|
|
|
2017-01-16 14:16:37 +00:00
|
|
|
static struct rtnl_af_ops mpls_af_ops __read_mostly = {
|
|
|
|
.family = AF_MPLS,
|
|
|
|
.fill_stats_af = mpls_fill_stats_af,
|
|
|
|
.get_stats_af_size = mpls_get_stats_af_size,
|
|
|
|
};
|
|
|
|
|
2024-10-08 18:47:36 +00:00
|
|
|
static const struct rtnl_msg_handler mpls_rtnl_msg_handlers[] __initdata_or_module = {
|
|
|
|
{THIS_MODULE, PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0},
|
|
|
|
{THIS_MODULE, PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0},
|
|
|
|
{THIS_MODULE, PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, 0},
|
|
|
|
{THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
|
|
|
|
mpls_netconf_get_devconf, mpls_netconf_dump_devconf,
|
|
|
|
RTNL_FLAG_DUMP_UNLOCKED},
|
|
|
|
};
|
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
static int __init mpls_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
|
|
|
|
|
|
|
|
err = register_pernet_subsys(&mpls_net_ops);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = register_netdevice_notifier(&mpls_dev_notifier);
|
|
|
|
if (err)
|
|
|
|
goto out_unregister_pernet;
|
|
|
|
|
|
|
|
dev_add_pack(&mpls_packet_type);
|
|
|
|
|
2024-10-16 18:53:56 +00:00
|
|
|
err = rtnl_af_register(&mpls_af_ops);
|
|
|
|
if (err)
|
|
|
|
goto out_unregister_dev_type;
|
2017-01-16 14:16:37 +00:00
|
|
|
|
2024-10-08 18:47:36 +00:00
|
|
|
err = rtnl_register_many(mpls_rtnl_msg_handlers);
|
2017-10-04 17:35:57 +00:00
|
|
|
if (err)
|
2024-10-08 18:47:36 +00:00
|
|
|
goto out_unregister_rtnl_af;
|
|
|
|
|
|
|
|
err = ipgre_tunnel_encap_add_mpls_ops();
|
|
|
|
if (err) {
|
2017-10-04 17:35:57 +00:00
|
|
|
pr_err("Can't add mpls over gre tunnel ops\n");
|
2024-10-08 18:47:36 +00:00
|
|
|
goto out_unregister_rtnl;
|
|
|
|
}
|
2017-10-04 17:35:57 +00:00
|
|
|
|
2015-03-04 01:10:47 +00:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
|
2024-10-08 18:47:36 +00:00
|
|
|
out_unregister_rtnl:
|
|
|
|
rtnl_unregister_many(mpls_rtnl_msg_handlers);
|
|
|
|
out_unregister_rtnl_af:
|
|
|
|
rtnl_af_unregister(&mpls_af_ops);
|
2024-10-16 18:53:56 +00:00
|
|
|
out_unregister_dev_type:
|
2024-10-08 18:47:36 +00:00
|
|
|
dev_remove_pack(&mpls_packet_type);
|
2015-03-04 01:10:47 +00:00
|
|
|
out_unregister_pernet:
|
|
|
|
unregister_pernet_subsys(&mpls_net_ops);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
module_init(mpls_init);
|
|
|
|
|
|
|
|
static void __exit mpls_exit(void)
|
|
|
|
{
|
2015-03-04 01:13:56 +00:00
|
|
|
rtnl_unregister_all(PF_MPLS);
|
2017-01-16 14:16:37 +00:00
|
|
|
rtnl_af_unregister(&mpls_af_ops);
|
2015-03-04 01:10:47 +00:00
|
|
|
dev_remove_pack(&mpls_packet_type);
|
|
|
|
unregister_netdevice_notifier(&mpls_dev_notifier);
|
|
|
|
unregister_pernet_subsys(&mpls_net_ops);
|
2017-10-04 17:35:57 +00:00
|
|
|
ipgre_tunnel_encap_del_mpls_ops();
|
2015-03-04 01:10:47 +00:00
|
|
|
}
|
|
|
|
module_exit(mpls_exit);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("MultiProtocol Label Switching");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_ALIAS_NETPROTO(PF_MPLS);
|