net: bridge: multicast: factor out bridge multicast context

Factor out the bridge's global multicast context into a separate
structure which will later be used for per-vlan global context.
No functional changes intended.

Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Nikolay Aleksandrov 2021-07-19 20:06:24 +03:00 committed by David S. Miller
parent 9632233e7d
commit d3d065c003
5 changed files with 333 additions and 273 deletions

View File

@ -16,13 +16,13 @@
#include "br_private.h"
static bool br_rports_have_mc_router(struct net_bridge *br)
static bool br_rports_have_mc_router(struct net_bridge_mcast *brmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
return !hlist_empty(&br->ip4_mc_router_list) ||
!hlist_empty(&br->ip6_mc_router_list);
return !hlist_empty(&brmctx->ip4_mc_router_list) ||
!hlist_empty(&brmctx->ip6_mc_router_list);
#else
return !hlist_empty(&br->ip4_mc_router_list);
return !hlist_empty(&brmctx->ip4_mc_router_list);
#endif
}
@ -54,10 +54,10 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct nlattr *nest, *port_nest;
struct net_bridge_port *p;
if (!br->multicast_router)
if (!br->multicast_ctx.multicast_router)
return 0;
if (!br_rports_have_mc_router(br))
if (!br_rports_have_mc_router(&br->multicast_ctx))
return 0;
nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
@ -240,7 +240,7 @@ static int __mdb_fill_info(struct sk_buff *skb,
switch (mp->addr.proto) {
case htons(ETH_P_IP):
dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
if (mp->addr.src.ip4) {
if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
mp->addr.src.ip4))
@ -250,7 +250,7 @@ static int __mdb_fill_info(struct sk_buff *skb,
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
if (!ipv6_addr_any(&mp->addr.src.ip6)) {
if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
&mp->addr.src.ip6))
@ -483,7 +483,7 @@ static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
/* MDBA_MDB_EATTR_SOURCE */
if (pg->key.addr.src.ip4)
nlmsg_size += nla_total_size(sizeof(__be32));
if (pg->key.port->br->multicast_igmp_version == 2)
if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
goto out;
addr_size = sizeof(__be32);
break;
@ -492,7 +492,7 @@ static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
/* MDBA_MDB_EATTR_SOURCE */
if (!ipv6_addr_any(&pg->key.addr.src.ip6))
nlmsg_size += nla_total_size(sizeof(struct in6_addr));
if (pg->key.port->br->multicast_mld_version == 1)
if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
goto out;
addr_size = sizeof(struct in6_addr);
break;
@ -1084,7 +1084,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
}
rcu_assign_pointer(*pp, p);
if (entry->state == MDB_TEMPORARY)
mod_timer(&p->timer, now + br->multicast_membership_interval);
mod_timer(&p->timer,
now + br->multicast_ctx.multicast_membership_interval);
br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
/* if we are adding a new EXCLUDE port group (*,G) it needs to be also
* added to all S,G entries for proper replication, if we are adding

File diff suppressed because it is too large Load Diff

View File

@ -1324,49 +1324,49 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
br->multicast_last_member_count = val;
br->multicast_ctx.multicast_last_member_count = val;
}
if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
br->multicast_startup_query_count = val;
br->multicast_ctx.multicast_startup_query_count = val;
}
if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
br->multicast_last_member_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
br->multicast_membership_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
br->multicast_querier_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
br->multicast_query_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
br->multicast_query_response_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
@ -1566,7 +1566,8 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER,
br->multicast_ctx.multicast_router) ||
nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
@ -1578,38 +1579,38 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
br->multicast_last_member_count) ||
br->multicast_ctx.multicast_last_member_count) ||
nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
br->multicast_startup_query_count) ||
br->multicast_ctx.multicast_startup_query_count) ||
nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
br->multicast_igmp_version))
br->multicast_ctx.multicast_igmp_version))
return -EMSGSIZE;
#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
br->multicast_mld_version))
br->multicast_ctx.multicast_mld_version))
return -EMSGSIZE;
#endif
clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_membership_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_querier_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;

View File

@ -106,6 +106,40 @@ struct net_bridge_mcast_port {
#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
};
/* net_bridge_mcast must be always defined due to forwarding stubs */
struct net_bridge_mcast {
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct net_bridge *br;
u32 multicast_last_member_count;
u32 multicast_startup_query_count;
u8 multicast_igmp_version;
u8 multicast_router;
#if IS_ENABLED(CONFIG_IPV6)
u8 multicast_mld_version;
#endif
unsigned long multicast_last_member_interval;
unsigned long multicast_membership_interval;
unsigned long multicast_querier_interval;
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
struct hlist_head ip4_mc_router_list;
struct timer_list ip4_mc_router_timer;
struct bridge_mcast_other_query ip4_other_query;
struct bridge_mcast_own_query ip4_own_query;
struct bridge_mcast_querier ip4_querier;
#if IS_ENABLED(CONFIG_IPV6)
struct hlist_head ip6_mc_router_list;
struct timer_list ip6_mc_router_timer;
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
};
struct br_tunnel_info {
__be64 tunnel_id;
struct metadata_dst __rcu *tunnel_dst;
@ -437,25 +471,14 @@ struct net_bridge {
BR_USER_STP, /* new RSTP in userspace */
} stp_enabled;
struct net_bridge_mcast multicast_ctx;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_stats __percpu *mcast_stats;
u32 hash_max;
u32 multicast_last_member_count;
u32 multicast_startup_query_count;
u8 multicast_igmp_version;
u8 multicast_router;
#if IS_ENABLED(CONFIG_IPV6)
u8 multicast_mld_version;
#endif
spinlock_t multicast_lock;
unsigned long multicast_last_member_interval;
unsigned long multicast_membership_interval;
unsigned long multicast_querier_interval;
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
struct rhashtable mdb_hash_tbl;
struct rhashtable sg_port_tbl;
@ -463,19 +486,6 @@ struct net_bridge {
struct hlist_head mcast_gc_list;
struct hlist_head mdb_list;
struct hlist_head ip4_mc_router_list;
struct timer_list ip4_mc_router_timer;
struct bridge_mcast_other_query ip4_other_query;
struct bridge_mcast_own_query ip4_own_query;
struct bridge_mcast_querier ip4_querier;
struct bridge_mcast_stats __percpu *mcast_stats;
#if IS_ENABLED(CONFIG_IPV6)
struct hlist_head ip6_mc_router_list;
struct timer_list ip6_mc_router_timer;
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
#endif /* IS_ENABLED(CONFIG_IPV6) */
struct work_struct mcast_gc_work;
#endif
@ -880,16 +890,20 @@ static inline bool br_group_is_l2(const struct br_ip *group)
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
static inline struct hlist_node *
br_multicast_get_first_rport_node(struct net_bridge *b, struct sk_buff *skb) {
br_multicast_get_first_rport_node(struct net_bridge *br, struct sk_buff *skb)
{
struct net_bridge_mcast *brmctx = &br->multicast_ctx;
#if IS_ENABLED(CONFIG_IPV6)
if (skb->protocol == htons(ETH_P_IPV6))
return rcu_dereference(hlist_first_rcu(&b->ip6_mc_router_list));
return rcu_dereference(hlist_first_rcu(&brmctx->ip6_mc_router_list));
#endif
return rcu_dereference(hlist_first_rcu(&b->ip4_mc_router_list));
return rcu_dereference(hlist_first_rcu(&brmctx->ip4_mc_router_list));
}
static inline struct net_bridge_port *
br_multicast_rport_from_node_skb(struct hlist_node *rp, struct sk_buff *skb) {
br_multicast_rport_from_node_skb(struct hlist_node *rp, struct sk_buff *skb)
{
struct net_bridge_mcast_port *mctx;
#if IS_ENABLED(CONFIG_IPV6)
@ -907,15 +921,15 @@ br_multicast_rport_from_node_skb(struct hlist_node *rp, struct sk_buff *skb) {
return NULL;
}
static inline bool br_ip4_multicast_is_router(struct net_bridge *br)
static inline bool br_ip4_multicast_is_router(struct net_bridge_mcast *brmctx)
{
return timer_pending(&br->ip4_mc_router_timer);
return timer_pending(&brmctx->ip4_mc_router_timer);
}
static inline bool br_ip6_multicast_is_router(struct net_bridge *br)
static inline bool br_ip6_multicast_is_router(struct net_bridge_mcast *brmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
return timer_pending(&br->ip6_mc_router_timer);
return timer_pending(&brmctx->ip6_mc_router_timer);
#else
return false;
#endif
@ -924,18 +938,20 @@ static inline bool br_ip6_multicast_is_router(struct net_bridge *br)
static inline bool
br_multicast_is_router(struct net_bridge *br, struct sk_buff *skb)
{
switch (br->multicast_router) {
struct net_bridge_mcast *brmctx = &br->multicast_ctx;
switch (brmctx->multicast_router) {
case MDB_RTR_TYPE_PERM:
return true;
case MDB_RTR_TYPE_TEMP_QUERY:
if (skb) {
if (skb->protocol == htons(ETH_P_IP))
return br_ip4_multicast_is_router(br);
return br_ip4_multicast_is_router(brmctx);
else if (skb->protocol == htons(ETH_P_IPV6))
return br_ip6_multicast_is_router(br);
return br_ip6_multicast_is_router(brmctx);
} else {
return br_ip4_multicast_is_router(br) ||
br_ip6_multicast_is_router(br);
return br_ip4_multicast_is_router(brmctx) ||
br_ip6_multicast_is_router(brmctx);
}
fallthrough;
default:
@ -970,11 +986,11 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
return __br_multicast_querier_exists(br,
&br->ip4_other_query, false);
&br->multicast_ctx.ip4_other_query, false);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
return __br_multicast_querier_exists(br,
&br->ip6_other_query, true);
&br->multicast_ctx.ip6_other_query, true);
#endif
default:
return !!mdb && br_group_is_l2(&mdb->addr);
@ -1000,10 +1016,10 @@ static inline bool br_multicast_should_handle_mode(const struct net_bridge *br,
{
switch (proto) {
case htons(ETH_P_IP):
return !!(br->multicast_igmp_version == 3);
return !!(br->multicast_ctx.multicast_igmp_version == 3);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return !!(br->multicast_mld_version == 2);
return !!(br->multicast_ctx.multicast_mld_version == 2);
#endif
default:
return false;
@ -1017,15 +1033,15 @@ static inline int br_multicast_igmp_type(const struct sk_buff *skb)
static inline unsigned long br_multicast_lmqt(const struct net_bridge *br)
{
return br->multicast_last_member_interval *
br->multicast_last_member_count;
return br->multicast_ctx.multicast_last_member_interval *
br->multicast_ctx.multicast_last_member_count;
}
static inline unsigned long br_multicast_gmi(const struct net_bridge *br)
{
/* use the RFC default of 2 for QRV */
return 2 * br->multicast_query_interval +
br->multicast_query_response_interval;
return 2 * br->multicast_ctx.multicast_query_interval +
br->multicast_ctx.multicast_query_response_interval;
}
#else
static inline int br_multicast_rcv(struct net_bridge *br,

View File

@ -384,7 +384,7 @@ static ssize_t multicast_router_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_router);
return sprintf(buf, "%d\n", br->multicast_ctx.multicast_router);
}
static int set_multicast_router(struct net_bridge *br, unsigned long val,
@ -514,7 +514,7 @@ static ssize_t multicast_igmp_version_show(struct device *d,
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_igmp_version);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_igmp_version);
}
static int set_multicast_igmp_version(struct net_bridge *br, unsigned long val,
@ -536,13 +536,13 @@ static ssize_t multicast_last_member_count_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_last_member_count);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_last_member_count);
}
static int set_last_member_count(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_last_member_count = val;
br->multicast_ctx.multicast_last_member_count = val;
return 0;
}
@ -558,13 +558,13 @@ static ssize_t multicast_startup_query_count_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_startup_query_count);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_startup_query_count);
}
static int set_startup_query_count(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_startup_query_count = val;
br->multicast_ctx.multicast_startup_query_count = val;
return 0;
}
@ -581,13 +581,13 @@ static ssize_t multicast_last_member_interval_show(
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_last_member_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval));
}
static int set_last_member_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_last_member_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
return 0;
}
@ -604,13 +604,13 @@ static ssize_t multicast_membership_interval_show(
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_membership_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval));
}
static int set_membership_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_membership_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
return 0;
}
@ -628,13 +628,13 @@ static ssize_t multicast_querier_interval_show(struct device *d,
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_querier_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval));
}
static int set_querier_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_querier_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
return 0;
}
@ -652,13 +652,13 @@ static ssize_t multicast_query_interval_show(struct device *d,
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval));
}
static int set_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_query_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
return 0;
}
@ -676,13 +676,13 @@ static ssize_t multicast_query_response_interval_show(
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_response_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval));
}
static int set_query_response_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_query_response_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
return 0;
}
@ -700,13 +700,13 @@ static ssize_t multicast_startup_query_interval_show(
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_startup_query_interval));
jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval));
}
static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
return 0;
}
@ -751,7 +751,7 @@ static ssize_t multicast_mld_version_show(struct device *d,
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_mld_version);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_mld_version);
}
static int set_multicast_mld_version(struct net_bridge *br, unsigned long val,