mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-10 07:50:04 +00:00
RDMA/mlx5: Use query_special_contexts for mkeys
Use query_sepcial_contexts to get the correct value of mkeys such as null_mkey, terminate_scatter_list_mkey and dump_fill_mkey, as FW will change them in certain configurations. Link: https://lore.kernel.org/r/000236f0a9487d48809f87bcc3620a3964b2d3d3.1673960981.git.leon@kernel.org Signed-off-by: Or Har-Toov <ohartoov@nvidia.com> Reviewed-by: Michael Guralnik <michaelgur@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
1b1e486883
commit
594cac11ab
@ -5,34 +5,41 @@
|
||||
|
||||
#include "cmd.h"
|
||||
|
||||
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
|
||||
int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
|
||||
bool is_terminate, is_dump, is_null;
|
||||
int err;
|
||||
|
||||
is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey);
|
||||
is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey);
|
||||
is_null = MLX5_CAP_GEN(dev->mdev, null_mkey);
|
||||
|
||||
dev->mkeys.terminate_scatter_list_mkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
|
||||
if (!is_terminate && !is_dump && !is_null)
|
||||
return 0;
|
||||
|
||||
MLX5_SET(query_special_contexts_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
|
||||
if (!err)
|
||||
*mkey = MLX5_GET(query_special_contexts_out, out,
|
||||
dump_fill_mkey);
|
||||
return err;
|
||||
}
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, query_special_contexts, in, out);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
|
||||
int err;
|
||||
if (is_dump)
|
||||
dev->mkeys.dump_fill_mkey = MLX5_GET(query_special_contexts_out,
|
||||
out, dump_fill_mkey);
|
||||
|
||||
MLX5_SET(query_special_contexts_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
|
||||
if (!err)
|
||||
*null_mkey = MLX5_GET(query_special_contexts_out, out,
|
||||
null_mkey);
|
||||
return err;
|
||||
if (is_null)
|
||||
dev->mkeys.null_mkey = cpu_to_be32(
|
||||
MLX5_GET(query_special_contexts_out, out, null_mkey));
|
||||
|
||||
if (is_terminate)
|
||||
dev->mkeys.terminate_scatter_list_mkey =
|
||||
cpu_to_be32(MLX5_GET(query_special_contexts_out, out,
|
||||
terminate_scatter_list_mkey));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
|
@ -37,8 +37,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
|
||||
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
|
||||
int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev);
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
void *out);
|
||||
int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
|
||||
|
@ -1756,13 +1756,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_ucontext *context = to_mucontext(uctx);
|
||||
struct mlx5_bfreg_info *bfregi = &context->bfregi;
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
|
||||
err = mlx5_cmd_dump_fill_mkey(dev->mdev,
|
||||
&resp->dump_fill_mkey);
|
||||
if (err)
|
||||
return err;
|
||||
resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
|
||||
resp->comp_mask |=
|
||||
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
|
||||
}
|
||||
@ -3666,6 +3662,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
|
||||
}
|
||||
|
||||
err = mlx5r_cmd_query_special_mkeys(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ib_init_multiport_master(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1054,6 +1054,13 @@ struct mlx5_port_caps {
|
||||
u8 ext_port_cap;
|
||||
};
|
||||
|
||||
|
||||
struct mlx5_special_mkeys {
|
||||
u32 dump_fill_mkey;
|
||||
__be32 null_mkey;
|
||||
__be32 terminate_scatter_list_mkey;
|
||||
};
|
||||
|
||||
struct mlx5_ib_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct mlx5_core_dev *mdev;
|
||||
@ -1084,7 +1091,6 @@ struct mlx5_ib_dev {
|
||||
|
||||
struct xarray odp_mkeys;
|
||||
|
||||
u32 null_mkey;
|
||||
struct mlx5_ib_flow_db *flow_db;
|
||||
/* protect resources needed as part of reset flow */
|
||||
spinlock_t reset_flow_resource_lock;
|
||||
@ -1113,6 +1119,7 @@ struct mlx5_ib_dev {
|
||||
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
|
||||
u16 pkey_table_len;
|
||||
u8 lag_ports;
|
||||
struct mlx5_special_mkeys mkeys;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
|
@ -104,7 +104,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
|
||||
if (flags & MLX5_IB_UPD_XLT_ZAP) {
|
||||
for (; pklm != end; pklm++, idx++) {
|
||||
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
|
||||
pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
|
||||
pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
|
||||
pklm->va = 0;
|
||||
}
|
||||
return;
|
||||
@ -137,7 +137,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
|
||||
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
|
||||
pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
|
||||
} else {
|
||||
pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
|
||||
pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
|
||||
pklm->va = 0;
|
||||
}
|
||||
}
|
||||
@ -1015,7 +1015,8 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
|
||||
|
||||
/* receive WQE end of sg list. */
|
||||
if (receive_queue && bcnt == 0 &&
|
||||
key == MLX5_TERMINATE_SCATTER_LIST_LKEY && io_virt == 0)
|
||||
key == dev->mkeys.terminate_scatter_list_mkey &&
|
||||
io_virt == 0)
|
||||
break;
|
||||
|
||||
if (!inline_segment && total_wqe_bytes) {
|
||||
@ -1615,25 +1616,15 @@ static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
|
||||
|
||||
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
internal_fill_odp_caps(dev);
|
||||
|
||||
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
|
||||
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
|
||||
if (ret) {
|
||||
mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_init(&dev->odp_eq_mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
|
||||
|
@ -447,7 +447,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
|
||||
if (i < srq->msrq.max_avail_gather) {
|
||||
scat[i].byte_count = 0;
|
||||
scat[i].lkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
|
||||
scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
|
||||
scat[i].addr = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1252,7 +1252,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
|
||||
if (i < qp->rq.max_gs) {
|
||||
scat[i].byte_count = 0;
|
||||
scat[i].lkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
|
||||
scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
|
||||
scat[i].addr = 0;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user