mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:27:47 +00:00
RDMA v6.13 first rc pull request
Alot of fixes accumulated over the holiday break: - Static tool fixes, value is already proven to be NULL, possible integer overflow - Many bnxt_re fixes: * Crashes due to a mismatch in the maximum SGE list size * Don't waste memory for user QPs by creating kernel-only structures * Fix compatability issues with older HW in some of the new HW features recently introduced: RTS->RTS feature, work around 9096 * Do not allow destroy_qp to fail * Validate QP MTU against device limits * Add missing validation on madatory QP attributes for RTR->RTS * Report port_num in query_qp as required by the spec * Fix creation of QPs of the maximum queue size, and in the variable mode * Allow all QPs to be used on newer HW by limiting a work around only to HW it affects * Use the correct MSN table size for variable mode QPs * Add missing locking in create_qp() accessing the qp_tbl * Form WQE buffers correctly when some of the buffers are 0 hop * Don't crash on QP destroy if the userspace doesn't setup the dip_ctx * Add the missing QP flush handler call on the DWQE path to avoid hanging on error recovery * Consistently use ENXIO for return codes if the devices is fatally errored - Try again to fix VLAN support on iwarp, previous fix was reverted due to breaking other cards - Correct error path return code for rdma netlink events - Remove the seperate net_device pointer in siw and rxe which syzkaller found a way to UAF - Fix a UAF of a stack ib_sge in rtrs - Fix a regression where old mlx5 devices and FW were wrongly activing new device features and failing -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZ3fvrwAKCRCFwuHvBreF YSuIAQDNZ8fPvjVdL8x1QIUVParAAewEUv3MbmV1uRVgoG7LrQEAxgw6UIUF4HOw u6aW8aRXGYM3hG241AKoYnU5yqHl4g0= =ilfd -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A lot of fixes accumulated over the holiday break: - Static tool fixes, value is already proven to be NULL, possible integer overflow - Many bnxt_re fixes: - Crashes due to a mismatch in the maximum SGE list size - Don't waste memory for user QPs by creating kernel-only structures - Fix compatability issues with older HW in some of the new HW features recently introduced: RTS->RTS feature, work around 9096 - Do not allow destroy_qp to fail - Validate QP MTU against device limits - Add missing validation on madatory QP attributes for RTR->RTS - Report port_num in query_qp as required by the spec - Fix creation of QPs of the maximum queue size, and in the variable mode - Allow all QPs to be used on newer HW by limiting a work around only to HW it affects - Use the correct MSN table size for variable mode QPs - Add missing locking in create_qp() accessing the qp_tbl - Form WQE buffers correctly when some of the buffers are 0 hop - Don't crash on QP destroy if the userspace doesn't setup the dip_ctx - Add the missing QP flush handler call on the DWQE path to avoid hanging on error recovery - Consistently use ENXIO for return codes if the devices is fatally errored - Try again to fix VLAN support on iwarp, previous fix was reverted due to breaking other cards - Correct error path return code for rdma netlink events - Remove the seperate net_device pointer in siw and rxe which syzkaller found a way to UAF - Fix a UAF of a stack ib_sge in rtrs - Fix a regression where old mlx5 devices and FW were wrongly activing new device features and failing" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (28 commits) RDMA/mlx5: Enable multiplane mode only when it is supported RDMA/bnxt_re: Fix error recovery sequence RDMA/rtrs: Ensure 'ib_sge list' is accessible RDMA/rxe: Remove the direct link to net_device RDMA/hns: Fix missing flush CQE for DWQE RDMA/hns: Fix warning storm caused by invalid input in IO path RDMA/hns: Fix accessing invalid dip_ctx during destroying QP RDMA/hns: Fix mapping error of zero-hop WQE buffer RDMA/bnxt_re: Fix the locking while accessing the QP table RDMA/bnxt_re: Fix MSN table size for variable wqe mode RDMA/bnxt_re: Add send queue size check for variable wqe RDMA/bnxt_re: Disable use of reserved wqes RDMA/bnxt_re: Fix max_qp_wrs reported RDMA/siw: Remove direct link to net_device RDMA/nldev: Set error code in rdma_nl_notify_event RDMA/bnxt_re: Fix reporting hw_ver in query_device RDMA/bnxt_re: Fix to export port num to ib_query_qp RDMA/bnxt_re: Fix setting mandatory attributes for modify_qp RDMA/bnxt_re: Add check for path mtu in modify_qp RDMA/bnxt_re: Fix the check for 9060 condition ...
This commit is contained in:
commit
dea3165f98
@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
int bound_if_index = dev_addr->bound_dev_if;
|
||||
int dev_type = dev_addr->dev_type;
|
||||
struct net_device *ndev = NULL;
|
||||
struct net_device *pdev = NULL;
|
||||
|
||||
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
|
||||
goto out;
|
||||
@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
|
||||
rcu_read_lock();
|
||||
ndev = rcu_dereference(sgid_attr->ndev);
|
||||
if (ndev->ifindex != bound_if_index) {
|
||||
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
|
||||
if (pdev) {
|
||||
if (is_vlan_dev(pdev)) {
|
||||
pdev = vlan_dev_real_dev(pdev);
|
||||
if (ndev->ifindex == pdev->ifindex)
|
||||
bound_if_index = pdev->ifindex;
|
||||
}
|
||||
if (is_vlan_dev(ndev)) {
|
||||
pdev = vlan_dev_real_dev(ndev);
|
||||
if (bound_if_index == pdev->ifindex)
|
||||
bound_if_index = ndev->ifindex;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!net_eq(dev_net(ndev), dev_addr->net) ||
|
||||
ndev->ifindex != bound_if_index) {
|
||||
rdma_put_gid_attr(sgid_attr);
|
||||
|
@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
|
||||
enum rdma_nl_notify_event_type type)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret = -EMSGSIZE;
|
||||
struct net *net;
|
||||
int ret = 0;
|
||||
void *nlh;
|
||||
|
||||
net = read_pnet(&device->coredev.rdma_net);
|
||||
|
@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
|
||||
{
|
||||
const void __user *res = iter->cur;
|
||||
|
||||
if (iter->cur + len > iter->end)
|
||||
if (len > iter->end - iter->cur)
|
||||
return (void __force __user *)ERR_PTR(-ENOSPC);
|
||||
iter->cur += len;
|
||||
return res;
|
||||
@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
||||
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
|
||||
if (ret)
|
||||
return ret;
|
||||
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
|
||||
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
|
||||
cmd.wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return PTR_ERR(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(&iter,
|
||||
size_mul(cmd.sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return PTR_ERR(sgls);
|
||||
ret = uverbs_request_finish(&iter);
|
||||
@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
||||
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
|
||||
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return ERR_CAST(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
iter, sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return ERR_CAST(sgls);
|
||||
ret = uverbs_request_finish(iter);
|
||||
|
@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
|
||||
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
|
||||
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
|
||||
ib_attr->max_qp = dev_attr->max_qp;
|
||||
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
|
||||
ib_attr->device_cap_flags =
|
||||
@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
unsigned int flags;
|
||||
int rc;
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
if (rc) {
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
|
||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
||||
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
|
||||
rc = bnxt_re_destroy_gsi_sqp(qp);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
|
||||
bnxt_re_destroy_gsi_sqp(qp);
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
list_del(&qp->list);
|
||||
@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
||||
atomic_dec(&rdev->stats.res.ud_qp_count);
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
}
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
|
||||
} else if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu =
|
||||
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
|
||||
qp->qplib_qp.mtu =
|
||||
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
|
||||
if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
enum ib_mtu qpmtu;
|
||||
|
||||
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
|
||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
|
||||
ib_mtu_enum_to_int(qpmtu))
|
||||
return -EINVAL;
|
||||
qpmtu = qp_attr->path_mtu;
|
||||
}
|
||||
|
||||
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_TIMEOUT) {
|
||||
@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp_attr->retry_cnt = qplib_qp->retry_cnt;
|
||||
qp_attr->rnr_retry = qplib_qp->rnr_retry;
|
||||
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
|
||||
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
|
||||
qp_attr->rq_psn = qplib_qp->rq.psn;
|
||||
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
|
||||
qp_attr->sq_psn = qplib_qp->sq.psn;
|
||||
@ -2824,7 +2822,8 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
@ -2936,7 +2935,8 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
|
||||
return rc;
|
||||
|
@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
|
||||
static inline u32 __to_ib_port_num(u16 port_id)
|
||||
{
|
||||
return (u32)port_id + 1;
|
||||
}
|
||||
|
||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||
|
@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
|
||||
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int mask = IB_QP_STATE;
|
||||
struct ib_qp_attr qp_attr;
|
||||
struct bnxt_re_qp *qp;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_ERR;
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
list_for_each_entry(qp, &rdev->qp_list, list) {
|
||||
/* Modify the state of all QPs except QP1/Shadow QP */
|
||||
@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||
if (qp->qplib_qp.state !=
|
||||
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
|
||||
qp->qplib_qp.state !=
|
||||
CMDQ_MODIFY_QP_NEW_STATE_ERR) {
|
||||
CMDQ_MODIFY_QP_NEW_STATE_ERR)
|
||||
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
|
||||
1, IB_EVENT_QP_FATAL);
|
||||
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
|
@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
srq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_SRQ,
|
||||
@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
spin_lock_init(&srq->lock);
|
||||
srq->start_idx = 0;
|
||||
srq->last_idx = srq->hwq.max_elements - 1;
|
||||
for (idx = 0; idx < srq->hwq.max_elements; idx++)
|
||||
srq->swq[idx].next_idx = idx + 1;
|
||||
srq->swq[srq->last_idx].next_idx = -1;
|
||||
if (!srq->hwq.is_user) {
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
for (idx = 0; idx < srq->hwq.max_elements; idx++)
|
||||
srq->swq[idx].next_idx = idx + 1;
|
||||
srq->swq[srq->last_idx].next_idx = -1;
|
||||
}
|
||||
|
||||
srq->id = le32_to_cpu(resp.xid);
|
||||
srq->dbinfo.hwq = &srq->hwq;
|
||||
@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
u32 tbl_indx;
|
||||
u16 nsge;
|
||||
|
||||
if (res->dattr)
|
||||
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
|
||||
|
||||
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
|
||||
sq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_QP,
|
||||
@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
: 0;
|
||||
/* Update msn tbl size */
|
||||
if (qp->is_host_msn_tbl && psn_sz) {
|
||||
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
|
||||
hwq_attr.aux_depth =
|
||||
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
else
|
||||
hwq_attr.aux_depth =
|
||||
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
|
||||
qp->msn_tbl_sz = hwq_attr.aux_depth;
|
||||
qp->msn = 0;
|
||||
}
|
||||
@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
goto fail_sq;
|
||||
|
||||
if (psn_sz)
|
||||
bnxt_qplib_init_psn_ptr(qp, psn_sz);
|
||||
if (!sq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
goto fail_sq;
|
||||
|
||||
if (psn_sz)
|
||||
bnxt_qplib_init_psn_ptr(qp, psn_sz);
|
||||
}
|
||||
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
pbl = &sq->hwq.pbl[PBL_LVL_0];
|
||||
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
|
||||
@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto sq_swq;
|
||||
rc = bnxt_qplib_alloc_init_swq(rq);
|
||||
if (rc)
|
||||
goto fail_rq;
|
||||
if (!rq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(rq);
|
||||
if (rc)
|
||||
goto fail_rq;
|
||||
}
|
||||
|
||||
req.rq_size = cpu_to_le32(rq->max_wqe);
|
||||
pbl = &rq->hwq.pbl[PBL_LVL_0];
|
||||
@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
rq->dbinfo.db = qp->dpi->dbr;
|
||||
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
|
||||
}
|
||||
spin_lock_bh(&rcfw->tbl_lock);
|
||||
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
|
||||
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
|
||||
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
|
||||
spin_unlock_bh(&rcfw->tbl_lock);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_qp *qp,
|
||||
struct cmdq_modify_qp *req)
|
||||
{
|
||||
u32 mandatory_flags = 0;
|
||||
@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
|
||||
}
|
||||
|
||||
if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
|
||||
(qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
|
||||
qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
|
||||
mandatory_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
|
||||
}
|
||||
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
|
||||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
|
||||
@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
|
||||
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
|
||||
is_optimized_state_transition(qp))
|
||||
bnxt_set_mandatory_attributes(qp, &req);
|
||||
bnxt_set_mandatory_attributes(res, qp, &req);
|
||||
}
|
||||
bmask = qp->modify_flags;
|
||||
req.modify_mask = cpu_to_le32(qp->modify_flags);
|
||||
@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
|
||||
memcpy(qp->smac, sb->src_mac, 6);
|
||||
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
|
||||
qp->port_id = le16_to_cpu(sb->port_id);
|
||||
bail:
|
||||
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
|
||||
sbuf.sb, sbuf.dma_addr);
|
||||
@ -2667,10 +2686,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
bnxt_qplib_add_flush_qp(qp);
|
||||
} else {
|
||||
/* Before we complete, do WA 9060 */
|
||||
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
|
||||
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||
cqe->status = CQ_REQ_STATUS_OK;
|
||||
|
@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
|
||||
u32 size;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_QP_MAX_SGL 6
|
||||
struct bnxt_qplib_swq {
|
||||
u64 wr_id;
|
||||
int next_idx;
|
||||
@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
|
||||
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
|
||||
struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
|
||||
int num_sge;
|
||||
/* Max inline data is 96 bytes */
|
||||
u32 inline_len;
|
||||
@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
|
||||
u32 dest_qpn;
|
||||
u8 smac[6];
|
||||
u16 vlan_id;
|
||||
u16 port_id;
|
||||
u8 nw_type;
|
||||
struct bnxt_qplib_ah ah;
|
||||
|
||||
|
@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
||||
/* Prevent posting if f/w is not in a state to process */
|
||||
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
|
||||
return bnxt_qplib_map_rc(opcode);
|
||||
return -ENXIO;
|
||||
|
||||
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
||||
rc = __send_message_basic_sanity(rcfw, msg, opcode);
|
||||
if (rc)
|
||||
return rc;
|
||||
return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
|
||||
|
||||
rc = __send_message(rcfw, msg, opcode);
|
||||
if (rc)
|
||||
|
@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
|
||||
}
|
||||
|
||||
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
|
||||
|
@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
attr->max_qp_init_rd_atom =
|
||||
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
|
||||
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
|
||||
/*
|
||||
* 128 WQEs needs to be reserved for the HW (8916). Prevent
|
||||
* reporting the max number
|
||||
*/
|
||||
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
|
||||
/*
|
||||
* 128 WQEs needs to be reserved for the HW (8916). Prevent
|
||||
* reporting the max number on legacy devices
|
||||
*/
|
||||
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
|
||||
}
|
||||
|
||||
/* Adjust for max_qp_wqes for variable wqe */
|
||||
if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
|
||||
attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
|
||||
|
||||
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
|
||||
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
|
||||
|
@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
|
||||
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
|
||||
__le16 max_xp_qp_size;
|
||||
__le16 create_qp_batch_size;
|
||||
__le16 destroy_qp_batch_size;
|
||||
|
@ -931,6 +931,7 @@ struct hns_roce_hem_item {
|
||||
size_t count; /* max ba numbers */
|
||||
int start; /* start buf offset in this hem */
|
||||
int end; /* end buf offset in this hem */
|
||||
bool exist_bt;
|
||||
};
|
||||
|
||||
/* All HEM items are linked in a tree structure */
|
||||
@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
|
||||
}
|
||||
}
|
||||
|
||||
hem->exist_bt = exist_bt;
|
||||
hem->count = count;
|
||||
hem->start = start;
|
||||
hem->end = end;
|
||||
@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
|
||||
}
|
||||
|
||||
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_item *hem, bool exist_bt)
|
||||
struct hns_roce_hem_item *hem)
|
||||
{
|
||||
if (exist_bt)
|
||||
if (hem->exist_bt)
|
||||
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
|
||||
hem->addr, hem->dma_addr);
|
||||
kfree(hem);
|
||||
}
|
||||
|
||||
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
|
||||
struct list_head *head, bool exist_bt)
|
||||
struct list_head *head)
|
||||
{
|
||||
struct hns_roce_hem_item *hem, *temp_hem;
|
||||
|
||||
list_for_each_entry_safe(hem, temp_hem, head, list) {
|
||||
list_del(&hem->list);
|
||||
hem_list_free_item(hr_dev, hem, exist_bt);
|
||||
hem_list_free_item(hr_dev, hem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
|
||||
|
||||
for (i = 0; i < region_cnt; i++) {
|
||||
r = (struct hns_roce_buf_region *)®ions[i];
|
||||
/* when r->hopnum = 0, the region should not occupy root_ba. */
|
||||
if (!r->hopnum)
|
||||
continue;
|
||||
|
||||
if (r->hopnum > 1) {
|
||||
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
|
||||
if (step > 0)
|
||||
@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
|
||||
|
||||
err_exit:
|
||||
for (level = 1; level < hopnum; level++)
|
||||
hem_list_free_all(hr_dev, &temp_list[level], true);
|
||||
hem_list_free_all(hr_dev, &temp_list[level]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
|
||||
{
|
||||
struct hns_roce_hem_item *hem;
|
||||
|
||||
/* This is on the has_mtt branch, if r->hopnum
|
||||
* is 0, there is no root_ba to reuse for the
|
||||
* region's fake hem, so a dma_alloc request is
|
||||
* necessary here.
|
||||
*/
|
||||
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
|
||||
r->count, false);
|
||||
r->count, !r->hopnum);
|
||||
if (!hem)
|
||||
return -ENOMEM;
|
||||
|
||||
hem_list_assign_bt(hem, cpu_base, phy_base);
|
||||
/* The root_ba can be reused only when r->hopnum > 0. */
|
||||
if (r->hopnum)
|
||||
hem_list_assign_bt(hem, cpu_base, phy_base);
|
||||
list_add(&hem->list, branch_head);
|
||||
list_add(&hem->sibling, leaf_head);
|
||||
|
||||
return r->count;
|
||||
/* If r->hopnum == 0, 0 is returned,
|
||||
* so that the root_bt entry is not occupied.
|
||||
*/
|
||||
return r->hopnum ? r->count : 0;
|
||||
}
|
||||
|
||||
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
|
||||
@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
|
||||
return -ENOMEM;
|
||||
|
||||
total = 0;
|
||||
for (i = 0; i < region_cnt && total < max_ba_num; i++) {
|
||||
for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
|
||||
r = ®ions[i];
|
||||
if (!r->count)
|
||||
continue;
|
||||
@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
|
||||
region_cnt);
|
||||
if (ret) {
|
||||
for (i = 0; i < region_cnt; i++)
|
||||
hem_list_free_all(hr_dev, &head.branch[i], false);
|
||||
hem_list_free_all(hr_dev, &head.branch[i]);
|
||||
|
||||
hem_list_free_all(hr_dev, &head.root, true);
|
||||
hem_list_free_all(hr_dev, &head.root);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
|
||||
|
||||
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
|
||||
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
|
||||
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
|
||||
j != 0);
|
||||
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
|
||||
|
||||
hem_list_free_all(hr_dev, &hem_list->root_bt, true);
|
||||
hem_list_free_all(hr_dev, &hem_list->root_bt);
|
||||
INIT_LIST_HEAD(&hem_list->btm_bt);
|
||||
hem_list->root_ba = 0;
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
|
||||
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
|
||||
|
||||
ret = set_ud_opcode(ud_sq_wqe, wr);
|
||||
if (WARN_ON(ret))
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
|
||||
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
|
||||
@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
|
||||
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
|
||||
|
||||
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
|
||||
if (WARN_ON(ret))
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
|
||||
@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
|
||||
#define HNS_ROCE_SL_SHIFT 2
|
||||
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
|
||||
|
||||
if (unlikely(qp->state == IB_QPS_ERR)) {
|
||||
flush_cqe(hr_dev, qp);
|
||||
return;
|
||||
}
|
||||
/* All kinds of DirectWQE have the same header field layout */
|
||||
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
|
||||
@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
|
||||
{
|
||||
struct hns_roce_dip *hr_dip = hr_qp->dip;
|
||||
|
||||
if (!hr_dip)
|
||||
return;
|
||||
|
||||
xa_lock(&hr_dev->qp_table.dip_xa);
|
||||
|
||||
hr_dip->qp_cnt--;
|
||||
|
@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
|
||||
mapped_cnt < page_cnt; i++) {
|
||||
r = &mtr->hem_cfg.region[i];
|
||||
/* if hopnum is 0, no need to map pages in this region */
|
||||
if (!r->hopnum) {
|
||||
mapped_cnt += r->count;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (r->offset + r->count > page_cnt) {
|
||||
ret = -EINVAL;
|
||||
|
@ -2839,7 +2839,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
|
||||
int err;
|
||||
|
||||
*num_plane = 0;
|
||||
if (!MLX5_CAP_GEN(mdev, ib_virt))
|
||||
if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
|
||||
return 0;
|
||||
|
||||
err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
|
||||
@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
|
||||
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
|
||||
list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
}
|
||||
|
||||
@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
|
||||
|
||||
mutex_lock(&mlx5_ib_multiport_mutex);
|
||||
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid)
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
|
||||
if (bound) {
|
||||
|
@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
|
||||
/* initialize rxe device parameters */
|
||||
static void rxe_init_device_param(struct rxe_dev *rxe)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
|
||||
|
||||
rxe->attr.vendor_id = RXE_VENDOR_ID;
|
||||
@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
|
||||
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
|
||||
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
|
||||
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
|
||||
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
|
||||
dev_put(ndev);
|
||||
|
||||
rxe->max_ucontext = RXE_MAX_UCONTEXT;
|
||||
}
|
||||
@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
|
||||
static void rxe_init_ports(struct rxe_dev *rxe)
|
||||
{
|
||||
struct rxe_port *port = &rxe->port;
|
||||
struct net_device *ndev;
|
||||
|
||||
rxe_init_port_param(port);
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
addrconf_addr_eui48((unsigned char *)&port->port_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
dev_put(ndev);
|
||||
spin_lock_init(&port->port_lock);
|
||||
}
|
||||
|
||||
@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
|
||||
/* called by ifc layer to create new rxe device.
|
||||
* The caller should allocate memory for rxe by calling ib_alloc_device.
|
||||
*/
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
rxe_init(rxe);
|
||||
rxe_set_mtu(rxe, mtu);
|
||||
|
||||
return rxe_register_device(rxe, ibdev_name);
|
||||
return rxe_register_device(rxe, ibdev_name, ndev);
|
||||
}
|
||||
|
||||
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
|
||||
|
@ -139,7 +139,8 @@ enum resp_states {
|
||||
|
||||
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
|
||||
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
|
||||
struct net_device *ndev);
|
||||
|
||||
void rxe_rcv(struct sk_buff *skb);
|
||||
|
||||
|
@ -31,10 +31,19 @@
|
||||
static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
struct net_device *ndev;
|
||||
int ret;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
|
||||
return dev_mc_add(rxe->ndev, ll_addr);
|
||||
ret = dev_mc_add(ndev, ll_addr);
|
||||
dev_put(ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
struct net_device *ndev;
|
||||
int ret;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
|
||||
return dev_mc_del(rxe->ndev, ll_addr);
|
||||
ret = dev_mc_del(ndev, ll_addr);
|
||||
dev_put(ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -524,7 +524,16 @@ out:
|
||||
*/
|
||||
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
|
||||
{
|
||||
return rxe->ndev->name;
|
||||
struct net_device *ndev;
|
||||
char *ndev_name;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return NULL;
|
||||
ndev_name = ndev->name;
|
||||
dev_put(ndev);
|
||||
|
||||
return ndev_name;
|
||||
}
|
||||
|
||||
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
|
||||
@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
|
||||
if (!rxe)
|
||||
return -ENOMEM;
|
||||
|
||||
rxe->ndev = ndev;
|
||||
ib_mark_name_assigned_by_user(&rxe->ib_dev);
|
||||
|
||||
err = rxe_add(rxe, ndev->mtu, ibdev_name);
|
||||
err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
|
||||
if (err) {
|
||||
ib_dealloc_device(&rxe->ib_dev);
|
||||
return err;
|
||||
@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
|
||||
|
||||
void rxe_set_port_state(struct rxe_dev *rxe)
|
||||
{
|
||||
if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
|
||||
if (netif_running(ndev) && netif_carrier_ok(ndev))
|
||||
rxe_port_up(rxe);
|
||||
else
|
||||
rxe_port_down(rxe);
|
||||
|
||||
dev_put(ndev);
|
||||
}
|
||||
|
||||
static int rxe_notify(struct notifier_block *not_blk,
|
||||
|
@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
u32 port_num, struct ib_port_attr *attr)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(ibdev);
|
||||
struct net_device *ndev;
|
||||
int err, ret;
|
||||
|
||||
if (port_num != 1) {
|
||||
@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(ibdev);
|
||||
if (!ndev) {
|
||||
err = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
memcpy(attr, &rxe->port.attr, sizeof(*attr));
|
||||
|
||||
mutex_lock(&rxe->usdev_lock);
|
||||
@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
|
||||
if (attr->state == IB_PORT_ACTIVE)
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
|
||||
else if (dev_get_flags(rxe->ndev) & IFF_UP)
|
||||
else if (dev_get_flags(ndev) & IFF_UP)
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
|
||||
else
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
|
||||
|
||||
mutex_unlock(&rxe->usdev_lock);
|
||||
|
||||
dev_put(ndev);
|
||||
return ret;
|
||||
|
||||
err_out:
|
||||
@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
|
||||
static int rxe_enable_driver(struct ib_device *ib_dev)
|
||||
{
|
||||
struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
rxe_set_port_state(rxe);
|
||||
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
|
||||
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
|
||||
|
||||
dev_put(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
|
||||
};
|
||||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
int err;
|
||||
struct ib_device *dev = &rxe->ib_dev;
|
||||
@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||
dev->num_comp_vectors = num_possible_cpus();
|
||||
dev->local_dma_lkey = 0;
|
||||
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
|
||||
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
|
||||
|
||||
ib_set_device_ops(dev, &rxe_dev_ops);
|
||||
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
|
||||
err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -370,6 +370,7 @@ struct rxe_port {
|
||||
u32 qp_gsi_index;
|
||||
};
|
||||
|
||||
#define RXE_PORT 1
|
||||
struct rxe_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct ib_device_attr attr;
|
||||
@ -377,8 +378,6 @@ struct rxe_dev {
|
||||
int max_inline_data;
|
||||
struct mutex usdev_lock;
|
||||
|
||||
struct net_device *ndev;
|
||||
|
||||
struct rxe_pool uc_pool;
|
||||
struct rxe_pool pd_pool;
|
||||
struct rxe_pool ah_pool;
|
||||
@ -406,6 +405,11 @@ struct rxe_dev {
|
||||
struct crypto_shash *tfm;
|
||||
};
|
||||
|
||||
static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
|
||||
{
|
||||
return ib_device_get_netdev(dev, RXE_PORT);
|
||||
}
|
||||
|
||||
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
|
||||
{
|
||||
atomic64_inc(&rxe->stats_counters[index]);
|
||||
@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
|
||||
return to_rpd(mw->ibmw.pd);
|
||||
}
|
||||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
|
||||
struct net_device *ndev);
|
||||
|
||||
#endif /* RXE_VERBS_H */
|
||||
|
@ -46,6 +46,9 @@
|
||||
*/
|
||||
#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
|
||||
|
||||
/* There is always only a port 1 per siw device */
|
||||
#define SIW_PORT 1
|
||||
|
||||
struct siw_dev_cap {
|
||||
int max_qp;
|
||||
int max_qp_wr;
|
||||
@ -69,16 +72,12 @@ struct siw_pd {
|
||||
|
||||
struct siw_device {
|
||||
struct ib_device base_dev;
|
||||
struct net_device *netdev;
|
||||
struct siw_dev_cap attrs;
|
||||
|
||||
u32 vendor_part_id;
|
||||
int numa_node;
|
||||
char raw_gid[ETH_ALEN];
|
||||
|
||||
/* physical port state (only one port per device) */
|
||||
enum ib_port_state state;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct xarray qp_xa;
|
||||
|
@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
{
|
||||
struct socket *s;
|
||||
struct siw_cep *cep = NULL;
|
||||
struct net_device *ndev = NULL;
|
||||
struct siw_device *sdev = to_siw_dev(id->device);
|
||||
int addr_family = id->local_addr.ss_family;
|
||||
int rv = 0;
|
||||
@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
|
||||
ndev = ib_device_get_netdev(id->device, SIW_PORT);
|
||||
if (ndev) {
|
||||
s->sk->sk_bound_dev_if = ndev->ifindex;
|
||||
} else {
|
||||
rv = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in));
|
||||
} else {
|
||||
@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
}
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv6_addr_any(&laddr->sin6_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
if (ipv6_addr_any(&laddr->sin6_addr)) {
|
||||
ndev = ib_device_get_netdev(id->device, SIW_PORT);
|
||||
if (ndev) {
|
||||
s->sk->sk_bound_dev_if = ndev->ifindex;
|
||||
} else {
|
||||
rv = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in6));
|
||||
}
|
||||
@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
}
|
||||
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
|
||||
cep->state = SIW_EPSTATE_LISTENING;
|
||||
dev_put(ndev);
|
||||
|
||||
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
|
||||
|
||||
@ -1879,6 +1893,7 @@ error:
|
||||
siw_cep_set_free_and_put(cep);
|
||||
}
|
||||
sock_release(s);
|
||||
dev_put(ndev);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||
return NULL;
|
||||
|
||||
base_dev = &sdev->base_dev;
|
||||
sdev->netdev = netdev;
|
||||
|
||||
if (netdev->addr_len) {
|
||||
memcpy(sdev->raw_gid, netdev->dev_addr,
|
||||
@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
sdev->state = IB_PORT_ACTIVE;
|
||||
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
break;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
sdev->state = IB_PORT_DOWN;
|
||||
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
|
||||
break;
|
||||
|
||||
@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
||||
siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
|
||||
break;
|
||||
/*
|
||||
* Todo: Below netdev events are currently not handled.
|
||||
* All other events are not handled
|
||||
*/
|
||||
case NETDEV_CHANGEMTU:
|
||||
case NETDEV_CHANGE:
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
|
||||
sdev = siw_device_create(netdev);
|
||||
if (sdev) {
|
||||
dev_dbg(&netdev->dev, "siw: new device\n");
|
||||
|
||||
if (netif_running(netdev) && netif_carrier_ok(netdev))
|
||||
sdev->state = IB_PORT_ACTIVE;
|
||||
else
|
||||
sdev->state = IB_PORT_DOWN;
|
||||
|
||||
ib_mark_name_assigned_by_user(&sdev->base_dev);
|
||||
rv = siw_device_register(sdev, basedev_name);
|
||||
if (rv)
|
||||
|
@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
|
||||
int siw_query_port(struct ib_device *base_dev, u32 port,
|
||||
struct ib_port_attr *attr)
|
||||
{
|
||||
struct siw_device *sdev = to_siw_dev(base_dev);
|
||||
struct net_device *ndev;
|
||||
int rv;
|
||||
|
||||
memset(attr, 0, sizeof(*attr));
|
||||
|
||||
rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
|
||||
&attr->active_width);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
ndev = ib_device_get_netdev(base_dev, SIW_PORT);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
attr->gid_tbl_len = 1;
|
||||
attr->max_msg_sz = -1;
|
||||
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
|
||||
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
|
||||
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
|
||||
attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
|
||||
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
|
||||
attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
|
||||
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
||||
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
|
||||
attr->state = sdev->state;
|
||||
/*
|
||||
* All zero
|
||||
*
|
||||
@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
|
||||
* attr->subnet_timeout = 0;
|
||||
* attr->init_type_repy = 0;
|
||||
*/
|
||||
dev_put(ndev);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
|
||||
{
|
||||
struct siw_qp *qp;
|
||||
struct siw_device *sdev;
|
||||
struct net_device *ndev;
|
||||
|
||||
if (base_qp && qp_attr && qp_init_attr) {
|
||||
if (base_qp && qp_attr && qp_init_attr)
|
||||
qp = to_siw_qp(base_qp);
|
||||
sdev = to_siw_dev(base_qp->device);
|
||||
} else {
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
|
||||
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
|
||||
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
|
||||
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
|
||||
qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
|
||||
qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
|
||||
qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
|
||||
qp_attr->max_rd_atomic = qp->attrs.irq_size;
|
||||
qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
|
||||
|
||||
@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
|
||||
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
dev_put(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
|
||||
struct rtrs_srv_mr *srv_mr;
|
||||
bool need_inval = false;
|
||||
enum ib_send_flags flags;
|
||||
struct ib_sge list;
|
||||
u32 imm;
|
||||
int err;
|
||||
|
||||
@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
|
||||
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
|
||||
imm_wr.wr.next = NULL;
|
||||
if (always_invalidate) {
|
||||
struct ib_sge list;
|
||||
struct rtrs_msg_rkey_rsp *msg;
|
||||
|
||||
srv_mr = &srv_path->mrs[id->msg_id];
|
||||
|
@ -1202,6 +1202,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
|
||||
return dev->coredev_type == MLX5_COREDEV_VF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
|
||||
const struct mlx5_core_dev *dev2)
|
||||
{
|
||||
return dev1->coredev_type == dev2->coredev_type;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu;
|
||||
|
@ -2119,7 +2119,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
|
||||
u8 migration_in_chunks[0x1];
|
||||
u8 reserved_at_d1[0x1];
|
||||
u8 sf_eq_usage[0x1];
|
||||
u8 reserved_at_d3[0xd];
|
||||
u8 reserved_at_d3[0x5];
|
||||
u8 multiplane[0x1];
|
||||
u8 reserved_at_d9[0x7];
|
||||
|
||||
u8 cross_vhca_object_to_object_supported[0x20];
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user