netfilter pull request 24-11-07

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEjF9xRqF1emXiQiqU1w0aZmrPKyEFAmcr+e0ACgkQ1w0aZmrP
 KyFS9RAAoB5S0NZBNf60fpG6b5ZlfDEfoOUXfp91CRT1WY7KaCekH19aLf5O5HAg
 iJNjdOs7R4YCMfUfbWzDj0ZYnayz0h618Nin/EufIJbAMoOnBMnb12r5DUhnMpnC
 M9noDQJzyXPhlE1gR3py7I9VgwdqfRa3+EfS1uTm1NL9tv7MLuej+Z6nnmQ2Sw2/
 tUfuhLyKvs3iIiegeojrsTGix4YnMNrIrQUqpJXq7jCfXHFPz10MmlR7fZuBK99s
 QuphQ9Onf7SXow2bxkdhB2cS4i3+BK5fLFXHDW9cLROL+PKPenAsdnKcsXe5ViL5
 Ck1/N4KxydXt3djmDWfvXFF2/BaxMHnO5S9p+1ZAE18auCz6KKzefjBo134rftgz
 GN8Fu8A2OQ9pZzod/21M0m2BDAoOG3kKRq6MjoUydfc8/T0q/FVtInprjh1twIvg
 3uZludlQUKANVeH3bRR16fE0Z5k8fdTX3BXxRgQNo9hrDjpnmAyJmh6XTCuS9XWJ
 L0VR94QLnN/yi7U7EWdEk2VV944Pfj6aeNjFC8AWHbhP+DzvELFFZvDvQ7AHaiK1
 fOZfuX4nO4i2WrnCHeVLhYxdkvGixyWxorMYcDNmRDSQirQLaARMaAMFfgI4TKkm
 R/wjJKXitntN9cLiiHuParkVQZAiFH1AwuvxQgCI5uaN6Gxpivo=
 =TkS8
 -----END PGP SIGNATURE-----

Merge tag 'nf-next-24-11-07' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following series contains Netfilter updates for net-next:

1) Make legacy xtables configs user selectable, from Breno Leitao.

2) Fix a few sparse warnings related to percpu, from Uros Bizjak.

3) Use strscpy_pad, from Justin Stitt.

4) Use nft_trans_elem_alloc() in catchall flush, from Florian Westphal.

5) A series of 7 patches to fix false positive with CONFIG_RCU_LIST=y.
   Florian also sees possible issue with 10 while module load/removal
   when requesting an expression that is available via module. As for
   patch 11, object is being updated so reference on the module already
   exists so I don't see any real issue.

   Florian says:

   "Unfortunately there are many more errors, and not all are false positives.

   First patches pass lockdep_commit_lock_is_held() to the rcu list traversal
   macro so that those splats are avoided.

   The last two patches are real code change as opposed to
   'pass the transaction mutex to relax rcu check':

   Those two lists are not protected by transaction mutex so could be altered
   in parallel.

   This targets nf-next because these are long-standing issues."

netfilter pull request 24-11-07

* tag 'nf-next-24-11-07' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next:
  netfilter: nf_tables: must hold rcu read lock while iterating object type list
  netfilter: nf_tables: must hold rcu read lock while iterating expression type list
  netfilter: nf_tables: avoid false-positive lockdep splats with basechain hook
  netfilter: nf_tables: avoid false-positive lockdep splats in set walker
  netfilter: nf_tables: avoid false-positive lockdep splats with flowtables
  netfilter: nf_tables: avoid false-positive lockdep splats with sets
  netfilter: nf_tables: avoid false-positive lockdep splat on rule deletion
  netfilter: nf_tables: prefer nft_trans_elem_alloc helper
  netfilter: nf_tables: replace deprecated strncpy with strscpy_pad
  netfilter: nf_tables: Fix percpu address space issues in nf_tables_api.c
  netfilter: Make legacy configs user selectable
====================

Link: https://patch.msgid.link/20241106234625.168468-1-pablo@netfilter.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2024-11-07 12:46:03 +01:00
commit 17bcfe6637
9 changed files with 120 additions and 69 deletions

View File

@ -1463,7 +1463,8 @@ struct nft_flowtable {
struct nf_flowtable data;
};
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla,
u8 genmask);

View File

@ -41,7 +41,13 @@ config NF_CONNTRACK_BRIDGE
# old sockopt interface and eval loop
config BRIDGE_NF_EBTABLES_LEGACY
tristate
tristate "Legacy EBTABLES support"
depends on BRIDGE && NETFILTER_XTABLES
default n
help
Legacy ebtables packet/frame classifier.
This is not needed if you are using ebtables over nftables
(iptables-nft).
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"

View File

@ -63,7 +63,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
return nft_meta_get_eval(expr, regs, pkt);
}
strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
strscpy_pad((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
return;
err:
regs->verdict.code = NFT_BREAK;

View File

@ -12,7 +12,13 @@ config NF_DEFRAG_IPV4
# old sockopt interface and eval loop
config IP_NF_IPTABLES_LEGACY
tristate
tristate "Legacy IP tables support"
default n
select NETFILTER_XTABLES
help
iptables is a legacy packet classifier.
This is not needed if you are using iptables over nftables
(iptables-nft).
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
@ -318,7 +324,13 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
tristate
tristate "Legacy ARPTABLES support"
depends on NETFILTER_XTABLES
default n
help
arptables is a legacy packet classifier.
This is not needed if you are using arptables over nftables
(iptables-nft).
config NFT_COMPAT_ARP
tristate

View File

@ -8,7 +8,14 @@ menu "IPv6: Netfilter Configuration"
# old sockopt interface and eval loop
config IP6_NF_IPTABLES_LEGACY
tristate
tristate "Legacy IP6 tables support"
depends on INET && IPV6
select NETFILTER_XTABLES
default n
help
ip6tables is a legacy packet classifier.
This is not needed if you are using iptables over nftables
(iptables-nft).
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"

View File

@ -1824,7 +1824,8 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
return -ENOSPC;
}
static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
static int nft_dump_basechain_hook(struct sk_buff *skb,
const struct net *net, int family,
const struct nft_base_chain *basechain,
const struct list_head *hook_list)
{
@ -1849,7 +1850,8 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
if (!hook_list)
hook_list = &basechain->hook_list;
list_for_each_entry_rcu(hook, hook_list, list) {
list_for_each_entry_rcu(hook, hook_list, list,
lockdep_commit_lock_is_held(net)) {
if (!first)
first = hook;
@ -1900,7 +1902,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
const struct nft_base_chain *basechain = nft_base_chain(chain);
struct nft_stats __percpu *stats;
if (nft_dump_basechain_hook(skb, family, basechain, hook_list))
if (nft_dump_basechain_hook(skb, net, family, basechain, hook_list))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
@ -2082,14 +2084,14 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
err = nla_parse_nested_deprecated(tb, NFTA_COUNTER_MAX, attr,
nft_counter_policy, NULL);
if (err < 0)
return ERR_PTR(err);
return ERR_PTR_PCPU(err);
if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
return ERR_PTR(-EINVAL);
return ERR_PTR_PCPU(-EINVAL);
newstats = netdev_alloc_pcpu_stats(struct nft_stats);
if (newstats == NULL)
return ERR_PTR(-ENOMEM);
return ERR_PTR_PCPU(-ENOMEM);
/* Restore old counters on this cpu, no problem. Per-cpu statistics
* are not exposed to userspace.
@ -2533,10 +2535,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats)) {
if (IS_ERR_PCPU(stats)) {
nft_chain_release_hook(&hook);
kfree(basechain);
return PTR_ERR(stats);
return PTR_ERR_PCPU(stats);
}
rcu_assign_pointer(basechain->stats, stats);
}
@ -2650,7 +2652,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
struct nft_table *table = ctx->table;
struct nft_chain *chain = ctx->chain;
struct nft_chain_hook hook = {};
struct nft_stats *stats = NULL;
struct nft_stats __percpu *stats = NULL;
struct nft_hook *h, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@ -2746,8 +2748,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats)) {
err = PTR_ERR(stats);
if (IS_ERR_PCPU(stats)) {
err = PTR_ERR_PCPU(stats);
goto err_hooks;
}
}
@ -3294,25 +3296,37 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
return -EINVAL;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
if (!type)
return -ENOENT;
rcu_read_lock();
if (!type->inner_ops)
return -EOPNOTSUPP;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
if (!type) {
err = -ENOENT;
goto out_unlock;
}
if (!type->inner_ops) {
err = -EOPNOTSUPP;
goto out_unlock;
}
err = nla_parse_nested_deprecated(info->tb, type->maxattr,
tb[NFTA_EXPR_DATA],
type->policy, NULL);
if (err < 0)
goto err_nla_parse;
goto out_unlock;
info->attr = nla;
info->ops = type->inner_ops;
/* No module reference will be taken on type->owner.
* Presence of type->inner_ops implies that the expression
* is builtin, so it cannot go away.
*/
rcu_read_unlock();
return 0;
err_nla_parse:
out_unlock:
rcu_read_unlock();
return err;
}
@ -3411,13 +3425,15 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
* Rules
*/
static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
static struct nft_rule *__nft_rule_lookup(const struct net *net,
const struct nft_chain *chain,
u64 handle)
{
struct nft_rule *rule;
// FIXME: this sucks
list_for_each_entry_rcu(rule, &chain->rules, list) {
list_for_each_entry_rcu(rule, &chain->rules, list,
lockdep_commit_lock_is_held(net)) {
if (handle == rule->handle)
return rule;
}
@ -3425,13 +3441,14 @@ static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
return ERR_PTR(-ENOENT);
}
static struct nft_rule *nft_rule_lookup(const struct nft_chain *chain,
static struct nft_rule *nft_rule_lookup(const struct net *net,
const struct nft_chain *chain,
const struct nlattr *nla)
{
if (nla == NULL)
return ERR_PTR(-EINVAL);
return __nft_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
return __nft_rule_lookup(net, chain, be64_to_cpu(nla_get_be64(nla)));
}
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
@ -3732,7 +3749,7 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
return 0;
}
/* called with rcu_read_lock held */
/* Caller must hold rcu read lock or transaction mutex */
static struct sk_buff *
nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
const struct nlattr * const nla[], bool reset)
@ -3759,7 +3776,7 @@ nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
return ERR_CAST(chain);
}
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
rule = nft_rule_lookup(net, chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return ERR_CAST(rule);
@ -3983,7 +4000,8 @@ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
list_for_each_entry_rcu(catchall, &set->catchall_list, list,
lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, dummy_iter.genmask))
continue;
@ -4057,7 +4075,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
rule = __nft_rule_lookup(net, chain, handle);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
@ -4079,7 +4097,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_RULE_POSITION]) {
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
old_rule = __nft_rule_lookup(chain, pos_handle);
old_rule = __nft_rule_lookup(net, chain, pos_handle);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
@ -4296,7 +4314,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
rule = nft_rule_lookup(info->net, chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
if (PTR_ERR(rule) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
@ -4456,7 +4474,8 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_CONCAT] = NLA_POLICY_NESTED_ARRAY(nft_concat_policy),
};
static struct nft_set *nft_set_lookup(const struct nft_table *table,
static struct nft_set *nft_set_lookup(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_set *set;
@ -4464,7 +4483,8 @@ static struct nft_set *nft_set_lookup(const struct nft_table *table,
if (nla == NULL)
return ERR_PTR(-EINVAL);
list_for_each_entry_rcu(set, &table->sets, list) {
list_for_each_entry_rcu(set, &table->sets, list,
lockdep_commit_lock_is_held(net)) {
if (!nla_strcmp(nla, set->name) &&
nft_active_genmask(set, genmask))
return set;
@ -4514,7 +4534,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
{
struct nft_set *set;
set = nft_set_lookup(table, nla_set_name, genmask);
set = nft_set_lookup(net, table, nla_set_name, genmask);
if (IS_ERR(set)) {
if (!nla_set_id)
return set;
@ -4890,7 +4910,7 @@ static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
if (!nla[NFTA_SET_TABLE])
return -EINVAL;
set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return PTR_ERR(set);
@ -5226,7 +5246,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
if (PTR_ERR(set) != -ENOENT) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
@ -5428,7 +5448,7 @@ static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
set = nft_set_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_SET_NAME];
set = nft_set_lookup(table, attr, genmask);
set = nft_set_lookup(net, table, attr, genmask);
}
if (IS_ERR(set)) {
@ -5492,7 +5512,8 @@ static int nft_set_catchall_bind_check(const struct nft_ctx *ctx,
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
list_for_each_entry_rcu(catchall, &set->catchall_list, list,
lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
@ -6258,7 +6279,7 @@ static int nft_set_dump_ctx_init(struct nft_set_dump_ctx *dump_ctx,
return PTR_ERR(table);
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
@ -6409,7 +6430,7 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
static struct nft_trans *nft_trans_elem_alloc(const struct nft_ctx *ctx,
int msg_type,
struct nft_set *set)
{
@ -7471,13 +7492,11 @@ static int __nft_set_catchall_flush(const struct nft_ctx *ctx,
{
struct nft_trans *trans;
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
sizeof(struct nft_trans_elem), GFP_KERNEL);
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
if (!trans)
return -ENOMEM;
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
nft_trans_elem_set(trans) = set;
nft_trans_elem_priv(trans) = elem_priv;
nft_trans_commit_list_add_tail(ctx->net, trans);
@ -7492,7 +7511,8 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
list_for_each_entry_rcu(catchall, &set->catchall_list, list,
lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
@ -7542,7 +7562,7 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
return PTR_ERR(table);
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
@ -7789,9 +7809,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
struct nft_trans *trans;
int err = -ENOMEM;
if (!try_module_get(type->owner))
return -ENOENT;
/* caller must have obtained type->owner reference. */
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
@ -7859,15 +7877,16 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
type = __nft_obj_type_get(objtype, family);
if (WARN_ON_ONCE(!type))
return -ENOENT;
if (!obj->ops->update)
return 0;
type = nft_obj_type_get(net, objtype, family);
if (WARN_ON_ONCE(IS_ERR(type)))
return PTR_ERR(type);
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
/* type->owner reference is put when transaction object is released. */
return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
}
@ -8103,7 +8122,7 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
return 0;
}
/* called with rcu_read_lock held */
/* Caller must hold rcu read lock or transaction mutex */
static struct sk_buff *
nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
const struct nlattr * const nla[], bool reset)
@ -8372,12 +8391,14 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
[NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
};
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_flowtable *flowtable;
list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
list_for_each_entry_rcu(flowtable, &table->flowtables, list,
lockdep_commit_lock_is_held(net)) {
if (!nla_strcmp(nla, flowtable->name) &&
nft_active_genmask(flowtable, genmask))
return flowtable;
@ -8733,7 +8754,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
return PTR_ERR(table);
}
flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable)) {
err = PTR_ERR(flowtable);
@ -8927,7 +8948,7 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_FLOWTABLE_NAME];
flowtable = nft_flowtable_lookup(table, attr, genmask);
flowtable = nft_flowtable_lookup(net, table, attr, genmask);
}
if (IS_ERR(flowtable)) {
@ -8997,7 +9018,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
if (!hook_list)
hook_list = &flowtable->hook_list;
list_for_each_entry_rcu(hook, hook_list, list) {
list_for_each_entry_rcu(hook, hook_list, list,
lockdep_commit_lock_is_held(net)) {
if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name))
goto nla_put_failure;
}
@ -9139,7 +9161,7 @@ static int nf_tables_getflowtable(struct sk_buff *skb,
return PTR_ERR(table);
}
flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);

View File

@ -409,8 +409,8 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
if (!tb[NFTA_FLOW_TABLE_NAME])
return -EINVAL;
flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
genmask);
flowtable = nft_flowtable_lookup(ctx->net, ctx->table,
tb[NFTA_FLOW_TABLE_NAME], genmask);
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);

View File

@ -88,13 +88,15 @@ bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
}
static struct nft_bitmap_elem *
nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
nft_bitmap_elem_find(const struct net *net,
const struct nft_set *set, struct nft_bitmap_elem *this,
u8 genmask)
{
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
list_for_each_entry_rcu(be, &priv->list, head) {
list_for_each_entry_rcu(be, &priv->list, head,
lockdep_is_held(&nft_pernet(net)->commit_mutex)) {
if (memcmp(nft_set_ext_key(&be->ext),
nft_set_ext_key(&this->ext), set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
@ -132,7 +134,7 @@ static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_next(net);
u32 idx, off;
be = nft_bitmap_elem_find(set, new, genmask);
be = nft_bitmap_elem_find(net, set, new, genmask);
if (be) {
*elem_priv = &be->priv;
return -EEXIST;
@ -201,7 +203,7 @@ nft_bitmap_deactivate(const struct net *net, const struct nft_set *set,
nft_bitmap_location(set, elem->key.val.data, &idx, &off);
be = nft_bitmap_elem_find(set, this, genmask);
be = nft_bitmap_elem_find(net, set, this, genmask);
if (!be)
return NULL;

View File

@ -647,7 +647,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
int i;
for (i = 0; i < priv->buckets; i++) {
hlist_for_each_entry_rcu(he, &priv->table[i], node) {
hlist_for_each_entry_rcu(he, &priv->table[i], node,
lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
if (iter->count < iter->skip)
goto cont;