mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for net: 1) Fix bogus EEXIST on element insertions to the rbtree with timeouts, from Stefano Brivio. 2) Preempt BUG splat in the pipapo element insertion path, also from Stefano. 3) Release filter from the ctnetlink error path. 4) Release flowtable hooks from the deletion path. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
38af8f2d60
@ -939,7 +939,8 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
|
||||
filter->mark.mask = 0xffffffff;
|
||||
}
|
||||
} else if (cda[CTA_MARK_MASK]) {
|
||||
return ERR_PTR(-EINVAL);
|
||||
err = -EINVAL;
|
||||
goto err_filter;
|
||||
}
|
||||
#endif
|
||||
if (!cda[CTA_FILTER])
|
||||
@ -947,15 +948,17 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
goto err_filter;
|
||||
|
||||
err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
goto err_filter;
|
||||
|
||||
if (filter->orig_flags) {
|
||||
if (!cda[CTA_TUPLE_ORIG])
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!cda[CTA_TUPLE_ORIG]) {
|
||||
err = -EINVAL;
|
||||
goto err_filter;
|
||||
}
|
||||
|
||||
err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
|
||||
CTA_TUPLE_ORIG,
|
||||
@ -963,23 +966,32 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
|
||||
&filter->zone,
|
||||
filter->orig_flags);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
goto err_filter;
|
||||
}
|
||||
|
||||
if (filter->reply_flags) {
|
||||
if (!cda[CTA_TUPLE_REPLY])
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!cda[CTA_TUPLE_REPLY]) {
|
||||
err = -EINVAL;
|
||||
goto err_filter;
|
||||
}
|
||||
|
||||
err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
|
||||
CTA_TUPLE_REPLY,
|
||||
filter->family,
|
||||
&filter->zone,
|
||||
filter->orig_flags);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
if (err < 0) {
|
||||
err = -EINVAL;
|
||||
goto err_filter;
|
||||
}
|
||||
}
|
||||
|
||||
return filter;
|
||||
|
||||
err_filter:
|
||||
kfree(filter);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
|
||||
|
@ -6550,12 +6550,22 @@ err1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook)
|
||||
{
|
||||
struct nft_hook *this, *next;
|
||||
|
||||
list_for_each_entry_safe(this, next, &flowtable_hook->list, list) {
|
||||
list_del(&this->list);
|
||||
kfree(this);
|
||||
}
|
||||
}
|
||||
|
||||
static int nft_delflowtable_hook(struct nft_ctx *ctx,
|
||||
struct nft_flowtable *flowtable)
|
||||
{
|
||||
const struct nlattr * const *nla = ctx->nla;
|
||||
struct nft_flowtable_hook flowtable_hook;
|
||||
struct nft_hook *this, *next, *hook;
|
||||
struct nft_hook *this, *hook;
|
||||
struct nft_trans *trans;
|
||||
int err;
|
||||
|
||||
@ -6564,33 +6574,40 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
list_for_each_entry_safe(this, next, &flowtable_hook.list, list) {
|
||||
list_for_each_entry(this, &flowtable_hook.list, list) {
|
||||
hook = nft_hook_list_find(&flowtable->hook_list, this);
|
||||
if (!hook) {
|
||||
err = -ENOENT;
|
||||
goto err_flowtable_del_hook;
|
||||
}
|
||||
hook->inactive = true;
|
||||
list_del(&this->list);
|
||||
kfree(this);
|
||||
}
|
||||
|
||||
trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
|
||||
sizeof(struct nft_trans_flowtable));
|
||||
if (!trans)
|
||||
return -ENOMEM;
|
||||
if (!trans) {
|
||||
err = -ENOMEM;
|
||||
goto err_flowtable_del_hook;
|
||||
}
|
||||
|
||||
nft_trans_flowtable(trans) = flowtable;
|
||||
nft_trans_flowtable_update(trans) = true;
|
||||
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
|
||||
nft_flowtable_hook_release(&flowtable_hook);
|
||||
|
||||
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
|
||||
|
||||
return 0;
|
||||
|
||||
err_flowtable_del_hook:
|
||||
list_for_each_entry(hook, &flowtable_hook.list, list)
|
||||
list_for_each_entry(this, &flowtable_hook.list, list) {
|
||||
hook = nft_hook_list_find(&flowtable->hook_list, this);
|
||||
if (!hook)
|
||||
break;
|
||||
|
||||
hook->inactive = false;
|
||||
}
|
||||
nft_flowtable_hook_release(&flowtable_hook);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1242,7 +1242,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
|
||||
end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
|
||||
}
|
||||
|
||||
if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
|
||||
if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
|
||||
put_cpu_ptr(m->scratch);
|
||||
|
||||
err = pipapo_realloc_scratch(m, bsize_max);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1250,6 +1252,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
|
||||
this_cpu_write(nft_pipapo_scratch_index, false);
|
||||
|
||||
m->bsize_max = bsize_max;
|
||||
} else {
|
||||
put_cpu_ptr(m->scratch);
|
||||
}
|
||||
|
||||
*ext2 = &e->ext;
|
||||
|
@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||
|
||||
if (nft_rbtree_interval_start(new)) {
|
||||
if (nft_rbtree_interval_end(rbe) &&
|
||||
nft_set_elem_active(&rbe->ext, genmask))
|
||||
nft_set_elem_active(&rbe->ext, genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext))
|
||||
overlap = false;
|
||||
} else {
|
||||
overlap = nft_rbtree_interval_end(rbe) &&
|
||||
nft_set_elem_active(&rbe->ext,
|
||||
genmask);
|
||||
genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext);
|
||||
}
|
||||
} else if (d > 0) {
|
||||
p = &parent->rb_right;
|
||||
@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||
if (nft_rbtree_interval_end(new)) {
|
||||
overlap = nft_rbtree_interval_end(rbe) &&
|
||||
nft_set_elem_active(&rbe->ext,
|
||||
genmask);
|
||||
genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext);
|
||||
} else if (nft_rbtree_interval_end(rbe) &&
|
||||
nft_set_elem_active(&rbe->ext, genmask)) {
|
||||
nft_set_elem_active(&rbe->ext, genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext)) {
|
||||
overlap = true;
|
||||
}
|
||||
} else {
|
||||
@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||
nft_rbtree_interval_start(new)) {
|
||||
p = &parent->rb_left;
|
||||
|
||||
if (nft_set_elem_active(&rbe->ext, genmask))
|
||||
if (nft_set_elem_active(&rbe->ext, genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext))
|
||||
overlap = false;
|
||||
} else if (nft_rbtree_interval_start(rbe) &&
|
||||
nft_rbtree_interval_end(new)) {
|
||||
p = &parent->rb_right;
|
||||
|
||||
if (nft_set_elem_active(&rbe->ext, genmask))
|
||||
if (nft_set_elem_active(&rbe->ext, genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext))
|
||||
overlap = false;
|
||||
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
|
||||
} else if (nft_set_elem_active(&rbe->ext, genmask) &&
|
||||
!nft_set_elem_expired(&rbe->ext)) {
|
||||
*ext = &rbe->ext;
|
||||
return -EEXIST;
|
||||
} else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user