mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
13cfd6a6d7
When metadata_dst struct is allocated (using metadata_dst_alloc()), it reserves room for options at the end of the struct. Change the memcpy() to unsafe_memcpy() as it is guaranteed that enough room (md_size bytes) was allocated and the field-spanning write is intentional. This resolves the following warning: ------------[ cut here ]------------ memcpy: detected field-spanning write (size 104) of single field "&new_md->u.tun_info" at include/net/dst_metadata.h:166 (size 96) WARNING: CPU: 2 PID: 391470 at include/net/dst_metadata.h:166 tun_dst_unclone+0x114/0x138 [geneve] Modules linked in: act_tunnel_key geneve ip6_udp_tunnel udp_tunnel act_vlan act_mirred act_skbedit cls_matchall nfnetlink_cttimeout act_gact cls_flower sch_ingress sbsa_gwdt ipmi_devintf ipmi_msghandler xfrm_interface xfrm6_tunnel tunnel6 tunnel4 xfrm_user xfrm_algo nvme_fabrics overlay optee openvswitch nsh nf_conncount ib_srp scsi_transport_srp rpcrdma rdma_ucm ib_iser rdma_cm ib_umad iw_cm libiscsi ib_ipoib scsi_transport_iscsi ib_cm uio_pdrv_genirq uio mlxbf_pmc pwr_mlxbf mlxbf_bootctl bluefield_edac nft_chain_nat binfmt_misc xt_MASQUERADE nf_nat xt_tcpmss xt_NFLOG nfnetlink_log xt_recent xt_hashlimit xt_state xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 xt_mark xt_comment ipt_REJECT nf_reject_ipv4 nft_compat nf_tables nfnetlink sch_fq_codel dm_multipath fuse efi_pstore ip_tables btrfs blake2b_generic raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor xor_neon raid6_pq raid1 raid0 nvme nvme_core mlx5_ib ib_uverbs ib_core ipv6 crc_ccitt mlx5_core crct10dif_ce mlxfw psample i2c_mlxbf gpio_mlxbf2 mlxbf_gige mlxbf_tmfifo CPU: 2 PID: 391470 Comm: handler6 Not tainted 6.10.0-rc1 #1 Hardware name: https://www.mellanox.com BlueField SoC/BlueField SoC, BIOS 4.5.0.12993 Dec 6 2023 pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : tun_dst_unclone+0x114/0x138 [geneve] lr : tun_dst_unclone+0x114/0x138 [geneve] sp : ffffffc0804533f0 x29: ffffffc0804533f0 x28: 000000000000024e x27: 0000000000000000 x26: ffffffdcfc0e8e40 x25: ffffff8086fa6600 x24: ffffff8096a0c000 x23: 0000000000000068 x22: 0000000000000008 x21: ffffff8092ad7000 x20: ffffff8081e17900 x19: ffffff8092ad7900 x18: 00000000fffffffd x17: 0000000000000000 x16: ffffffdcfa018488 x15: 695f6e75742e753e x14: 2d646d5f77656e26 x13: 6d5f77656e262220 x12: 646c65696620656c x11: ffffffdcfbe33ae8 x10: ffffffdcfbe1baa8 x9 : ffffffdcfa0a4c10 x8 : 0000000000017fe8 x7 : c0000000ffffefff x6 : 0000000000000001 x5 : ffffff83fdeeb010 x4 : 0000000000000000 x3 : 0000000000000027 x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffffff80913f6780 Call trace: tun_dst_unclone+0x114/0x138 [geneve] geneve_xmit+0x214/0x10e0 [geneve] dev_hard_start_xmit+0xc0/0x220 __dev_queue_xmit+0xa14/0xd38 dev_queue_xmit+0x14/0x28 [openvswitch] ovs_vport_send+0x98/0x1c8 [openvswitch] do_output+0x80/0x1a0 [openvswitch] do_execute_actions+0x172c/0x1958 [openvswitch] ovs_execute_actions+0x64/0x1a8 [openvswitch] ovs_packet_cmd_execute+0x258/0x2d8 [openvswitch] genl_family_rcv_msg_doit+0xc8/0x138 genl_rcv_msg+0x1ec/0x280 netlink_rcv_skb+0x64/0x150 genl_rcv+0x40/0x60 netlink_unicast+0x2e4/0x348 netlink_sendmsg+0x1b0/0x400 __sock_sendmsg+0x64/0xc0 ____sys_sendmsg+0x284/0x308 ___sys_sendmsg+0x88/0xf0 __sys_sendmsg+0x70/0xd8 __arm64_sys_sendmsg+0x2c/0x40 invoke_syscall+0x50/0x128 el0_svc_common.constprop.0+0x48/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x38/0x100 el0t_64_sync_handler+0xc0/0xc8 el0t_64_sync+0x1a4/0x1a8 ---[ end trace 0000000000000000 ]--- Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Gal Pressman <gal@nvidia.com> Link: https://patch.msgid.link/20240818114351.3612692-1-gal@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
277 lines
6.6 KiB
C
277 lines
6.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __NET_DST_METADATA_H
|
|
#define __NET_DST_METADATA_H 1
|
|
|
|
#include <linux/skbuff.h>
|
|
#include <net/ip_tunnels.h>
|
|
#include <net/macsec.h>
|
|
#include <net/dst.h>
|
|
|
|
enum metadata_type {
|
|
METADATA_IP_TUNNEL,
|
|
METADATA_HW_PORT_MUX,
|
|
METADATA_MACSEC,
|
|
METADATA_XFRM,
|
|
};
|
|
|
|
struct hw_port_info {
|
|
struct net_device *lower_dev;
|
|
u32 port_id;
|
|
};
|
|
|
|
struct macsec_info {
|
|
sci_t sci;
|
|
};
|
|
|
|
struct xfrm_md_info {
|
|
u32 if_id;
|
|
int link;
|
|
struct dst_entry *dst_orig;
|
|
};
|
|
|
|
struct metadata_dst {
|
|
struct dst_entry dst;
|
|
enum metadata_type type;
|
|
union {
|
|
struct ip_tunnel_info tun_info;
|
|
struct hw_port_info port_info;
|
|
struct macsec_info macsec_info;
|
|
struct xfrm_md_info xfrm_info;
|
|
} u;
|
|
};
|
|
|
|
static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
|
|
{
|
|
struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
|
|
|
|
if (md_dst && md_dst->dst.flags & DST_METADATA)
|
|
return md_dst;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct ip_tunnel_info *
|
|
skb_tunnel_info(const struct sk_buff *skb)
|
|
{
|
|
struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
|
struct dst_entry *dst;
|
|
|
|
if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
|
|
return &md_dst->u.tun_info;
|
|
|
|
dst = skb_dst(skb);
|
|
if (dst && dst->lwtstate &&
|
|
(dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
|
|
dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
|
|
return lwt_tun_info(dst->lwtstate);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
|
|
{
|
|
return (struct xfrm_md_info *)lwt->data;
|
|
}
|
|
|
|
static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
|
|
{
|
|
struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
|
struct dst_entry *dst;
|
|
|
|
if (md_dst && md_dst->type == METADATA_XFRM)
|
|
return &md_dst->u.xfrm_info;
|
|
|
|
dst = skb_dst(skb);
|
|
if (dst && dst->lwtstate &&
|
|
dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
|
|
return lwt_xfrm_info(dst->lwtstate);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool skb_valid_dst(const struct sk_buff *skb)
|
|
{
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
|
|
return dst && !(dst->flags & DST_METADATA);
|
|
}
|
|
|
|
static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
|
|
const struct sk_buff *skb_b)
|
|
{
|
|
const struct metadata_dst *a, *b;
|
|
|
|
if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
|
|
return 0;
|
|
|
|
a = (const struct metadata_dst *) skb_dst(skb_a);
|
|
b = (const struct metadata_dst *) skb_dst(skb_b);
|
|
|
|
if (!a != !b || a->type != b->type)
|
|
return 1;
|
|
|
|
switch (a->type) {
|
|
case METADATA_HW_PORT_MUX:
|
|
return memcmp(&a->u.port_info, &b->u.port_info,
|
|
sizeof(a->u.port_info));
|
|
case METADATA_IP_TUNNEL:
|
|
return memcmp(&a->u.tun_info, &b->u.tun_info,
|
|
sizeof(a->u.tun_info) +
|
|
a->u.tun_info.options_len);
|
|
case METADATA_MACSEC:
|
|
return memcmp(&a->u.macsec_info, &b->u.macsec_info,
|
|
sizeof(a->u.macsec_info));
|
|
case METADATA_XFRM:
|
|
return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
|
|
sizeof(a->u.xfrm_info));
|
|
default:
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
void metadata_dst_free(struct metadata_dst *);
|
|
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
|
|
gfp_t flags);
|
|
void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst);
|
|
struct metadata_dst __percpu *
|
|
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
|
|
|
|
static inline struct metadata_dst *tun_rx_dst(int md_size)
|
|
{
|
|
struct metadata_dst *tun_dst;
|
|
|
|
tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
|
|
if (!tun_dst)
|
|
return NULL;
|
|
|
|
tun_dst->u.tun_info.options_len = 0;
|
|
tun_dst->u.tun_info.mode = 0;
|
|
return tun_dst;
|
|
}
|
|
|
|
static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
|
|
{
|
|
struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
|
int md_size;
|
|
struct metadata_dst *new_md;
|
|
|
|
if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
md_size = md_dst->u.tun_info.options_len;
|
|
new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
|
|
if (!new_md)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
unsafe_memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
|
|
sizeof(struct ip_tunnel_info) + md_size,
|
|
/* metadata_dst_alloc() reserves room (md_size bytes) for
|
|
* options right after the ip_tunnel_info struct.
|
|
*/);
|
|
#ifdef CONFIG_DST_CACHE
|
|
/* Unclone the dst cache if there is one */
|
|
if (new_md->u.tun_info.dst_cache.cache) {
|
|
int ret;
|
|
|
|
ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
|
|
if (ret) {
|
|
metadata_dst_free(new_md);
|
|
return ERR_PTR(ret);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
skb_dst_drop(skb);
|
|
skb_dst_set(skb, &new_md->dst);
|
|
return new_md;
|
|
}
|
|
|
|
static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
|
|
{
|
|
struct metadata_dst *dst;
|
|
|
|
dst = tun_dst_unclone(skb);
|
|
if (IS_ERR(dst))
|
|
return NULL;
|
|
|
|
return &dst->u.tun_info;
|
|
}
|
|
|
|
static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
|
|
__be32 daddr,
|
|
__u8 tos, __u8 ttl,
|
|
__be16 tp_dst,
|
|
const unsigned long *flags,
|
|
__be64 tunnel_id,
|
|
int md_size)
|
|
{
|
|
struct metadata_dst *tun_dst;
|
|
|
|
tun_dst = tun_rx_dst(md_size);
|
|
if (!tun_dst)
|
|
return NULL;
|
|
|
|
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
|
|
saddr, daddr, tos, ttl,
|
|
0, 0, tp_dst, tunnel_id, flags);
|
|
return tun_dst;
|
|
}
|
|
|
|
static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
|
|
const unsigned long *flags,
|
|
__be64 tunnel_id,
|
|
int md_size)
|
|
{
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
|
|
0, flags, tunnel_id, md_size);
|
|
}
|
|
|
|
static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
|
|
const struct in6_addr *daddr,
|
|
__u8 tos, __u8 ttl,
|
|
__be16 tp_dst,
|
|
__be32 label,
|
|
const unsigned long *flags,
|
|
__be64 tunnel_id,
|
|
int md_size)
|
|
{
|
|
struct metadata_dst *tun_dst;
|
|
struct ip_tunnel_info *info;
|
|
|
|
tun_dst = tun_rx_dst(md_size);
|
|
if (!tun_dst)
|
|
return NULL;
|
|
|
|
info = &tun_dst->u.tun_info;
|
|
info->mode = IP_TUNNEL_INFO_IPV6;
|
|
ip_tunnel_flags_copy(info->key.tun_flags, flags);
|
|
info->key.tun_id = tunnel_id;
|
|
info->key.tp_src = 0;
|
|
info->key.tp_dst = tp_dst;
|
|
|
|
info->key.u.ipv6.src = *saddr;
|
|
info->key.u.ipv6.dst = *daddr;
|
|
|
|
info->key.tos = tos;
|
|
info->key.ttl = ttl;
|
|
info->key.label = label;
|
|
|
|
return tun_dst;
|
|
}
|
|
|
|
static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
|
|
const unsigned long *flags,
|
|
__be64 tunnel_id,
|
|
int md_size)
|
|
{
|
|
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
|
|
|
return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
|
|
ipv6_get_dsfield(ip6h), ip6h->hop_limit,
|
|
0, ip6_flowlabel(ip6h), flags, tunnel_id,
|
|
md_size);
|
|
}
|
|
#endif /* __NET_DST_METADATA_H */
|