mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 09:56:46 +00:00
net/mlx5e: SHAMPO, Change frag page setup order during allocation
Now that the UMR allocation has been simplified, it is no longer possible to have a leftover page from a previous call to mlx5e_build_shampo_hd_umr(). This patch simplifies the code by switching the order of operations: first take the frag page and then increment the index. This is more straightforward and it also paves the way for dropping the info array. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Link: https://patch.msgid.link/20241107194357.683732-11-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
1a4b588577
commit
4f56868b71
@ -651,7 +651,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
|
||||
u16 pi, header_offset, err, wqe_bbs;
|
||||
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
|
||||
u16 page_index = shampo->curr_page_index;
|
||||
struct mlx5e_frag_page *frag_page;
|
||||
struct mlx5e_frag_page *frag_page = NULL;
|
||||
struct mlx5e_dma_info *dma_info;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
int headroom, i;
|
||||
@ -663,16 +663,14 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
|
||||
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
|
||||
build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
|
||||
|
||||
frag_page = &shampo->pages[page_index];
|
||||
|
||||
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
|
||||
for (i = 0; i < ksm_entries; i++, index++) {
|
||||
dma_info = &shampo->info[index];
|
||||
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
|
||||
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
|
||||
if (!(header_offset & (PAGE_SIZE - 1))) {
|
||||
page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
|
||||
frag_page = &shampo->pages[page_index];
|
||||
page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
|
||||
|
||||
err = mlx5e_page_alloc_fragmented(rq, frag_page);
|
||||
if (unlikely(err))
|
||||
|
Loading…
x
Reference in New Issue
Block a user