mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
Steffen Klassert says: ==================== ipsec 2022-11-23 1) Fix "disable_policy" on ipv4 early demuxP Packets after the initial packet in a flow might be incorectly dropped on early demux if there are no matching policies. From Eyal Birger. 2) Fix a kernel warning in case XFRM encap type is not available. From Eyal Birger. 3) Fix ESN wrap around for GSO to avoid a double usage of a sequence number. From Christian Langrock. 4) Fix a send_acquire race with pfkey_register. From Herbert Xu. 5) Fix a list corruption panic in __xfrm_state_delete(). Thomas Jarosch. 6) Fix an unchecked return value in xfrm6_init(). Chen Zhongjin. * 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec: xfrm: Fix ignored return value in xfrm6_init() xfrm: Fix oops in __xfrm_state_delete() af_key: Fix send_acquire race with pfkey_register xfrm: replay: Fix ESN wrap around for GSO xfrm: lwtunnel: squelch kernel warning in case XFRM encap type is not available xfrm: fix "disable_policy" on ipv4 early demux ==================== Link: https://lore.kernel.org/r/20221123093117.434274-1-steffen.klassert@secunet.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
06ccc8ec70
@ -48,9 +48,11 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
|
||||
return "RPL";
|
||||
case LWTUNNEL_ENCAP_IOAM6:
|
||||
return "IOAM6";
|
||||
case LWTUNNEL_ENCAP_XFRM:
|
||||
/* module autoload not supported for encap type */
|
||||
return NULL;
|
||||
case LWTUNNEL_ENCAP_IP6:
|
||||
case LWTUNNEL_ENCAP_IP:
|
||||
case LWTUNNEL_ENCAP_XFRM:
|
||||
case LWTUNNEL_ENCAP_NONE:
|
||||
case __LWTUNNEL_ENCAP_MAX:
|
||||
/* should not have got here */
|
||||
|
@ -314,6 +314,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
|
||||
xo->seq.low += skb_shinfo(skb)->gso_segs;
|
||||
}
|
||||
|
||||
if (xo->seq.low < seq)
|
||||
xo->seq.hi++;
|
||||
|
||||
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
|
||||
|
||||
ip_hdr(skb)->tot_len = htons(skb->len);
|
||||
|
@ -366,6 +366,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
|
||||
iph->tos, dev);
|
||||
if (unlikely(err))
|
||||
goto drop_error;
|
||||
} else {
|
||||
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
||||
|
||||
if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
|
||||
IPCB(skb)->flags |= IPSKB_NOPOLICY;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
|
@ -346,6 +346,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
|
||||
xo->seq.low += skb_shinfo(skb)->gso_segs;
|
||||
}
|
||||
|
||||
if (xo->seq.low < seq)
|
||||
xo->seq.hi++;
|
||||
|
||||
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
|
||||
|
||||
len = skb->len - sizeof(struct ipv6hdr);
|
||||
|
@ -287,9 +287,13 @@ int __init xfrm6_init(void)
|
||||
if (ret)
|
||||
goto out_state;
|
||||
|
||||
register_pernet_subsys(&xfrm6_net_ops);
|
||||
ret = register_pernet_subsys(&xfrm6_net_ops);
|
||||
if (ret)
|
||||
goto out_protocol;
|
||||
out:
|
||||
return ret;
|
||||
out_protocol:
|
||||
xfrm6_protocol_fini();
|
||||
out_state:
|
||||
xfrm6_state_fini();
|
||||
out_policy:
|
||||
|
@ -2905,7 +2905,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
|
||||
break;
|
||||
if (!aalg->pfkey_supported)
|
||||
continue;
|
||||
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||
if (aalg_tmpl_set(t, aalg))
|
||||
sz += sizeof(struct sadb_comb);
|
||||
}
|
||||
return sz + sizeof(struct sadb_prop);
|
||||
@ -2923,7 +2923,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
||||
if (!ealg->pfkey_supported)
|
||||
continue;
|
||||
|
||||
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
|
||||
if (!(ealg_tmpl_set(t, ealg)))
|
||||
continue;
|
||||
|
||||
for (k = 1; ; k++) {
|
||||
@ -2934,16 +2934,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
||||
if (!aalg->pfkey_supported)
|
||||
continue;
|
||||
|
||||
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||
if (aalg_tmpl_set(t, aalg))
|
||||
sz += sizeof(struct sadb_comb);
|
||||
}
|
||||
}
|
||||
return sz + sizeof(struct sadb_prop);
|
||||
}
|
||||
|
||||
static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
{
|
||||
struct sadb_prop *p;
|
||||
int sz = 0;
|
||||
int i;
|
||||
|
||||
p = skb_put(skb, sizeof(struct sadb_prop));
|
||||
@ -2971,13 +2972,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
c->sadb_comb_soft_addtime = 20*60*60;
|
||||
c->sadb_comb_hard_usetime = 8*60*60;
|
||||
c->sadb_comb_soft_usetime = 7*60*60;
|
||||
sz += sizeof(*c);
|
||||
}
|
||||
}
|
||||
|
||||
return sz + sizeof(*p);
|
||||
}
|
||||
|
||||
static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
{
|
||||
struct sadb_prop *p;
|
||||
int sz = 0;
|
||||
int i, k;
|
||||
|
||||
p = skb_put(skb, sizeof(struct sadb_prop));
|
||||
@ -3019,8 +3024,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
|
||||
c->sadb_comb_soft_addtime = 20*60*60;
|
||||
c->sadb_comb_hard_usetime = 8*60*60;
|
||||
c->sadb_comb_soft_usetime = 7*60*60;
|
||||
sz += sizeof(*c);
|
||||
}
|
||||
}
|
||||
|
||||
return sz + sizeof(*p);
|
||||
}
|
||||
|
||||
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
|
||||
@ -3150,6 +3158,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
|
||||
struct sadb_x_sec_ctx *sec_ctx;
|
||||
struct xfrm_sec_ctx *xfrm_ctx;
|
||||
int ctx_size = 0;
|
||||
int alg_size = 0;
|
||||
|
||||
sockaddr_size = pfkey_sockaddr_size(x->props.family);
|
||||
if (!sockaddr_size)
|
||||
@ -3161,16 +3170,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
|
||||
sizeof(struct sadb_x_policy);
|
||||
|
||||
if (x->id.proto == IPPROTO_AH)
|
||||
size += count_ah_combs(t);
|
||||
alg_size = count_ah_combs(t);
|
||||
else if (x->id.proto == IPPROTO_ESP)
|
||||
size += count_esp_combs(t);
|
||||
alg_size = count_esp_combs(t);
|
||||
|
||||
if ((xfrm_ctx = x->security)) {
|
||||
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
|
||||
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
|
||||
}
|
||||
|
||||
skb = alloc_skb(size + 16, GFP_ATOMIC);
|
||||
skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3224,10 +3233,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
|
||||
pol->sadb_x_policy_priority = xp->priority;
|
||||
|
||||
/* Set sadb_comb's. */
|
||||
alg_size = 0;
|
||||
if (x->id.proto == IPPROTO_AH)
|
||||
dump_ah_combs(skb, t);
|
||||
alg_size = dump_ah_combs(skb, t);
|
||||
else if (x->id.proto == IPPROTO_ESP)
|
||||
dump_esp_combs(skb, t);
|
||||
alg_size = dump_esp_combs(skb, t);
|
||||
|
||||
hdr->sadb_msg_len += alg_size / 8;
|
||||
|
||||
/* security context */
|
||||
if (xfrm_ctx) {
|
||||
@ -3382,7 +3394,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
|
||||
hdr->sadb_msg_len = size / sizeof(uint64_t);
|
||||
hdr->sadb_msg_errno = 0;
|
||||
hdr->sadb_msg_reserved = 0;
|
||||
hdr->sadb_msg_seq = x->km.seq = get_acqseq();
|
||||
hdr->sadb_msg_seq = x->km.seq;
|
||||
hdr->sadb_msg_pid = 0;
|
||||
|
||||
/* SA */
|
||||
|
@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
__u32 seq = xo->seq.low;
|
||||
|
||||
seq += skb_shinfo(skb)->gso_segs;
|
||||
if (unlikely(seq < xo->seq.low))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
|
||||
{
|
||||
int err;
|
||||
@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
|
||||
return skb;
|
||||
}
|
||||
|
||||
if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
|
||||
if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
|
||||
unlikely(xmit_xfrm_check_overflow(skb)))) {
|
||||
struct sk_buff *segs;
|
||||
|
||||
/* Packet got rerouted, fixup features and segment it. */
|
||||
|
@ -714,7 +714,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
|
||||
oseq += skb_shinfo(skb)->gso_segs;
|
||||
}
|
||||
|
||||
if (unlikely(oseq < replay_esn->oseq)) {
|
||||
if (unlikely(xo->seq.low < replay_esn->oseq)) {
|
||||
XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
|
||||
xo->seq.hi = oseq_hi;
|
||||
replay_esn->oseq_hi = oseq_hi;
|
||||
|
Loading…
Reference in New Issue
Block a user