mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
Merge branch 'for-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
This commit is contained in:
commit
709e151bfa
@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
int bound_if_index = dev_addr->bound_dev_if;
|
||||
int dev_type = dev_addr->dev_type;
|
||||
struct net_device *ndev = NULL;
|
||||
struct net_device *pdev = NULL;
|
||||
|
||||
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
|
||||
goto out;
|
||||
@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
|
||||
rcu_read_lock();
|
||||
ndev = rcu_dereference(sgid_attr->ndev);
|
||||
if (ndev->ifindex != bound_if_index) {
|
||||
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
|
||||
if (pdev) {
|
||||
if (is_vlan_dev(pdev)) {
|
||||
pdev = vlan_dev_real_dev(pdev);
|
||||
if (ndev->ifindex == pdev->ifindex)
|
||||
bound_if_index = pdev->ifindex;
|
||||
}
|
||||
if (is_vlan_dev(ndev)) {
|
||||
pdev = vlan_dev_real_dev(ndev);
|
||||
if (bound_if_index == pdev->ifindex)
|
||||
bound_if_index = ndev->ifindex;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!net_eq(dev_net(ndev), dev_addr->net) ||
|
||||
ndev->ifindex != bound_if_index) {
|
||||
rdma_put_gid_attr(sgid_attr);
|
||||
|
@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
|
||||
{
|
||||
const void __user *res = iter->cur;
|
||||
|
||||
if (iter->cur + len > iter->end)
|
||||
if (len > iter->end - iter->cur)
|
||||
return (void __force __user *)ERR_PTR(-ENOSPC);
|
||||
iter->cur += len;
|
||||
return res;
|
||||
@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
||||
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
|
||||
if (ret)
|
||||
return ret;
|
||||
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
|
||||
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
|
||||
cmd.wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return PTR_ERR(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(&iter,
|
||||
size_mul(cmd.sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return PTR_ERR(sgls);
|
||||
ret = uverbs_request_finish(&iter);
|
||||
@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
||||
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
|
||||
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return ERR_CAST(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
iter, sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return ERR_CAST(sgls);
|
||||
ret = uverbs_request_finish(iter);
|
||||
|
@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
|
||||
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
|
||||
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
|
||||
ib_attr->max_qp = dev_attr->max_qp;
|
||||
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
|
||||
ib_attr->device_cap_flags =
|
||||
@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
unsigned int flags;
|
||||
int rc;
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
if (rc) {
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
|
||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
||||
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
|
||||
rc = bnxt_re_destroy_gsi_sqp(qp);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
|
||||
bnxt_re_destroy_gsi_sqp(qp);
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
list_del(&qp->list);
|
||||
@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
||||
atomic_dec(&rdev->stats.res.ud_qp_count);
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
}
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
|
||||
} else if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu =
|
||||
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
|
||||
qp->qplib_qp.mtu =
|
||||
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
|
||||
if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
enum ib_mtu qpmtu;
|
||||
|
||||
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
|
||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
|
||||
ib_mtu_enum_to_int(qpmtu))
|
||||
return -EINVAL;
|
||||
qpmtu = qp_attr->path_mtu;
|
||||
}
|
||||
|
||||
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_TIMEOUT) {
|
||||
@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp_attr->retry_cnt = qplib_qp->retry_cnt;
|
||||
qp_attr->rnr_retry = qplib_qp->rnr_retry;
|
||||
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
|
||||
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
|
||||
qp_attr->rq_psn = qplib_qp->rq.psn;
|
||||
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
|
||||
qp_attr->sq_psn = qplib_qp->sq.psn;
|
||||
@ -2824,7 +2822,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
@ -2936,7 +2935,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
|
||||
return rc;
|
||||
|
@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
|
||||
static inline u32 __to_ib_port_num(u16 port_id)
|
||||
{
|
||||
return (u32)port_id + 1;
|
||||
}
|
||||
|
||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||
|
@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
srq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_SRQ,
|
||||
@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
spin_lock_init(&srq->lock);
|
||||
srq->start_idx = 0;
|
||||
srq->last_idx = srq->hwq.max_elements - 1;
|
||||
for (idx = 0; idx < srq->hwq.max_elements; idx++)
|
||||
srq->swq[idx].next_idx = idx + 1;
|
||||
srq->swq[srq->last_idx].next_idx = -1;
|
||||
if (!srq->hwq.is_user) {
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
for (idx = 0; idx < srq->hwq.max_elements; idx++)
|
||||
srq->swq[idx].next_idx = idx + 1;
|
||||
srq->swq[srq->last_idx].next_idx = -1;
|
||||
}
|
||||
|
||||
srq->id = le32_to_cpu(resp.xid);
|
||||
srq->dbinfo.hwq = &srq->hwq;
|
||||
@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
u32 tbl_indx;
|
||||
u16 nsge;
|
||||
|
||||
if (res->dattr)
|
||||
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
|
||||
|
||||
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
|
||||
sq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_QP,
|
||||
@ -1044,13 +1043,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
goto fail_sq;
|
||||
|
||||
if (psn_sz)
|
||||
bnxt_qplib_init_psn_ptr(qp, psn_sz);
|
||||
if (!sq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
goto fail_sq;
|
||||
|
||||
if (psn_sz)
|
||||
bnxt_qplib_init_psn_ptr(qp, psn_sz);
|
||||
}
|
||||
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
pbl = &sq->hwq.pbl[PBL_LVL_0];
|
||||
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
|
||||
@ -1076,9 +1076,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto sq_swq;
|
||||
rc = bnxt_qplib_alloc_init_swq(rq);
|
||||
if (rc)
|
||||
goto fail_rq;
|
||||
if (!rq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(rq);
|
||||
if (rc)
|
||||
goto fail_rq;
|
||||
}
|
||||
|
||||
req.rq_size = cpu_to_le32(rq->max_wqe);
|
||||
pbl = &rq->hwq.pbl[PBL_LVL_0];
|
||||
@ -1283,7 +1285,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_qp *qp,
|
||||
struct cmdq_modify_qp *req)
|
||||
{
|
||||
u32 mandatory_flags = 0;
|
||||
@ -1298,6 +1301,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
|
||||
}
|
||||
|
||||
if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
|
||||
(qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
|
||||
qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
|
||||
mandatory_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
|
||||
}
|
||||
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
|
||||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
|
||||
@ -1338,7 +1349,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
|
||||
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
|
||||
is_optimized_state_transition(qp))
|
||||
bnxt_set_mandatory_attributes(qp, &req);
|
||||
bnxt_set_mandatory_attributes(res, qp, &req);
|
||||
}
|
||||
bmask = qp->modify_flags;
|
||||
req.modify_mask = cpu_to_le32(qp->modify_flags);
|
||||
@ -1521,6 +1532,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
|
||||
memcpy(qp->smac, sb->src_mac, 6);
|
||||
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
|
||||
qp->port_id = le16_to_cpu(sb->port_id);
|
||||
bail:
|
||||
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
|
||||
sbuf.sb, sbuf.dma_addr);
|
||||
@ -2667,10 +2679,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
bnxt_qplib_add_flush_qp(qp);
|
||||
} else {
|
||||
/* Before we complete, do WA 9060 */
|
||||
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
|
||||
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||
cqe->status = CQ_REQ_STATUS_OK;
|
||||
|
@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
|
||||
u32 size;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_QP_MAX_SGL 6
|
||||
struct bnxt_qplib_swq {
|
||||
u64 wr_id;
|
||||
int next_idx;
|
||||
@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
|
||||
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
|
||||
struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
|
||||
int num_sge;
|
||||
/* Max inline data is 96 bytes */
|
||||
u32 inline_len;
|
||||
@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
|
||||
u32 dest_qpn;
|
||||
u8 smac[6];
|
||||
u16 vlan_id;
|
||||
u16 port_id;
|
||||
u8 nw_type;
|
||||
struct bnxt_qplib_ah ah;
|
||||
|
||||
|
@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
|
||||
}
|
||||
|
||||
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
|
||||
|
@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
|
||||
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
|
||||
__le16 max_xp_qp_size;
|
||||
__le16 create_qp_batch_size;
|
||||
__le16 destroy_qp_batch_size;
|
||||
|
@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
|
||||
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
|
||||
list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
}
|
||||
|
||||
@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
|
||||
|
||||
mutex_lock(&mlx5_ib_multiport_mutex);
|
||||
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid)
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
|
||||
if (bound) {
|
||||
|
@ -1202,6 +1202,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
|
||||
return dev->coredev_type == MLX5_COREDEV_VF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
|
||||
const struct mlx5_core_dev *dev2)
|
||||
{
|
||||
return dev1->coredev_type == dev2->coredev_type;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu;
|
||||
|
Loading…
Reference in New Issue
Block a user