RDMA/mlx5: Extend ODP statistics with operation count

The current ODP counters represent the total number of pages
handled, but it is not enough to understand the effectiveness
of these operations.

Extend the ODP counters to include the number of times page fault
and invalidation events were handled.

Example for a single page fault handling 512 pages:
- page_fault: incremented by 512 (total pages)
- page_fault_handled: incremented by 1 (operation count)

The same example is applicable for page invalidation too.

Previous output:
$ rdma stat mr
dev rocep8s0f0 mrn 8 page_faults 27 page_invalidations 0 page_prefetch 29

New output:
$ rdma stat mr
dev rocep8s0f0 mrn 21 page_faults 512 page_faults_handled 1
page_invalidations 0 page_invalidations_handled 0 page_prefetch 51200

Signed-off-by: Chiara Meiohas <cmeiohas@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/b18f29ed1392996ade66e9e6c45f018925253f6a.1733234165.git.leonro@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Chiara Meiohas 2024-12-03 15:57:11 +02:00 committed by Leon Romanovsky
parent f5afe060b1
commit fbef60de6c
4 changed files with 20 additions and 3 deletions

View File

@ -669,6 +669,12 @@ struct mlx5_ib_mkey {
#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))
#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
do { \
mlx5_update_odp_stats(mr, counter_name, value); \
atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
} while (0)
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;

View File

@ -313,7 +313,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
mlx5_update_odp_stats(mr, invalidations, invalidations);
mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@ -997,7 +997,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
if (ret < 0)
goto end;
mlx5_update_odp_stats(mr, faults, ret);
mlx5_update_odp_stats_with_handled(mr, faults, ret);
npages += ret;
ret = 0;
@ -1529,7 +1529,7 @@ static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
goto err;
}
mlx5_update_odp_stats(mr, faults, ret);
mlx5_update_odp_stats_with_handled(mr, faults, ret);
mlx5r_deref_odp_mkey(mmkey);
if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)

View File

@ -95,10 +95,19 @@ static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
atomic64_read(&mr->odp_stats.faults)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_faults_handled",
atomic64_read(&mr->odp_stats.faults_handled)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations_handled",
atomic64_read(&mr->odp_stats.invalidations_handled)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;

View File

@ -2256,7 +2256,9 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
atomic64_t faults_handled;
atomic64_t invalidations;
atomic64_t invalidations_handled;
atomic64_t prefetch;
};