mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
RDMA/mlx5: Change check for cacheable mkeys
umem can be NULL for user application mkeys in some cases. Therefore
umem can't be used for checking if the mkey is cacheable and it is
changed for checking a flag that indicates it. Also make sure that
all mkeys which are not returned to the cache will be destroyed.
Fixes: dd1b913fb0
("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
Signed-off-by: Or Har-Toov <ohartoov@nvidia.com>
Link: https://lore.kernel.org/r/2690bc5c6896bcb937f89af16a1ff0343a7ab3d0.1712140377.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
0611a8e8b4
commit
8c1185fef6
@ -646,6 +646,7 @@ struct mlx5_ib_mkey {
|
||||
/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
|
||||
struct mlx5r_cache_rb_key rb_key;
|
||||
struct mlx5_cache_ent *cache_ent;
|
||||
u8 cacheable : 1;
|
||||
};
|
||||
|
||||
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
|
||||
|
@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
|
||||
if (IS_ERR(mr))
|
||||
return mr;
|
||||
mr->mmkey.rb_key = rb_key;
|
||||
mr->mmkey.cacheable = true;
|
||||
return mr;
|
||||
}
|
||||
|
||||
@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
|
||||
mr->ibmr.pd = pd;
|
||||
mr->umem = umem;
|
||||
mr->page_shift = order_base_2(page_size);
|
||||
mr->mmkey.cacheable = true;
|
||||
set_mr_fields(dev, mr, umem->length, access_flags, iova);
|
||||
|
||||
return mr;
|
||||
@ -1835,6 +1837,23 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
|
||||
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
|
||||
|
||||
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
|
||||
return 0;
|
||||
|
||||
if (ent) {
|
||||
spin_lock_irq(&ent->mkeys_queue.lock);
|
||||
ent->in_use--;
|
||||
mr->mmkey.cache_ent = NULL;
|
||||
spin_unlock_irq(&ent->mkeys_queue.lock);
|
||||
}
|
||||
return destroy_mkey(dev, mr);
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
@ -1880,16 +1899,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
/* Stop DMA */
|
||||
if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
|
||||
if (mlx5r_umr_revoke_mr(mr) ||
|
||||
cache_ent_find_and_store(dev, mr))
|
||||
mr->mmkey.cache_ent = NULL;
|
||||
|
||||
if (!mr->mmkey.cache_ent) {
|
||||
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
rc = mlx5_revoke_mr(mr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (mr->umem) {
|
||||
bool is_odp = is_odp_mr(mr);
|
||||
|
Loading…
Reference in New Issue
Block a user