mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
xfrm: generalize xdo_dev_state_update_curlft to allow statistics update
In order to allow drivers to fill all statistics, change the name of xdo_dev_state_update_curlft to be xdo_dev_state_update_stats. Acked-by: Steffen Klassert <steffen.klassert@secunet.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
8ff25dac88
commit
fd2bc4195d
@ -71,9 +71,9 @@ Callbacks to implement
|
||||
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
|
||||
struct xfrm_state *x);
|
||||
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
|
||||
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
|
||||
|
||||
/* Solely packet offload callbacks */
|
||||
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
|
||||
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
|
||||
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
|
||||
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
|
||||
@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
|
||||
|
||||
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
|
||||
The HW/driver are responsible to perform it and provide accurate data when
|
||||
xdo_dev_state_update_curlft() is called. In case of one of these limits
|
||||
xdo_dev_state_update_stats() is called. In case of one of these limits
|
||||
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
|
||||
that XFRM performs rekeying sequence.
|
||||
|
@ -984,7 +984,7 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
|
||||
queue_work(sa_entry->ipsec->wq, &work->work);
|
||||
}
|
||||
|
||||
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
|
||||
static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
|
||||
{
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
|
||||
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
|
||||
@ -993,7 +993,8 @@ static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
|
||||
lockdep_assert(lockdep_is_held(&x->lock) ||
|
||||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
|
||||
|
||||
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
|
||||
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ ||
|
||||
x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
|
||||
return;
|
||||
|
||||
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
|
||||
@ -1156,7 +1157,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
|
||||
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
|
||||
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
|
||||
|
||||
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
|
||||
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
|
||||
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
|
||||
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
|
||||
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
|
||||
|
@ -1062,7 +1062,7 @@ struct xfrmdev_ops {
|
||||
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
|
||||
struct xfrm_state *x);
|
||||
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
|
||||
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
|
||||
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
|
||||
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
|
||||
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
|
||||
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
|
||||
|
@ -1578,21 +1578,18 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
|
||||
unsigned short family);
|
||||
int xfrm_state_check_expire(struct xfrm_state *x);
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
|
||||
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
|
||||
{
|
||||
struct xfrm_dev_offload *xdo = &x->xso;
|
||||
struct net_device *dev = xdo->dev;
|
||||
|
||||
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
|
||||
return;
|
||||
|
||||
if (dev && dev->xfrmdev_ops &&
|
||||
dev->xfrmdev_ops->xdo_dev_state_update_curlft)
|
||||
dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
|
||||
dev->xfrmdev_ops->xdo_dev_state_update_stats)
|
||||
dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
|
||||
|
||||
}
|
||||
#else
|
||||
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
|
||||
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
|
||||
#endif
|
||||
void xfrm_state_insert(struct xfrm_state *x);
|
||||
int xfrm_state_add(struct xfrm_state *x);
|
||||
|
@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
|
||||
int err = 0;
|
||||
|
||||
spin_lock(&x->lock);
|
||||
xfrm_dev_state_update_curlft(x);
|
||||
xfrm_dev_state_update_stats(x);
|
||||
|
||||
if (x->km.state == XFRM_STATE_DEAD)
|
||||
goto out;
|
||||
@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
|
||||
|
||||
int xfrm_state_check_expire(struct xfrm_state *x)
|
||||
{
|
||||
xfrm_dev_state_update_curlft(x);
|
||||
xfrm_dev_state_update_stats(x);
|
||||
|
||||
if (!READ_ONCE(x->curlft.use_time))
|
||||
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
|
||||
|
@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
|
||||
memcpy(&p->sel, &x->sel, sizeof(p->sel));
|
||||
memcpy(&p->lft, &x->lft, sizeof(p->lft));
|
||||
if (x->xso.dev)
|
||||
xfrm_dev_state_update_curlft(x);
|
||||
xfrm_dev_state_update_stats(x);
|
||||
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
|
||||
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
|
||||
put_unaligned(x->stats.replay, &p->stats.replay);
|
||||
|
Loading…
Reference in New Issue
Block a user