mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
IB/mlx5: Add function to read WQE from user-space
Add a helper function mlx5_ib_read_user_wqe to read information from user-space owned work queues. The function will be used in a later patch by the page-fault handling code in mlx5_ib. Signed-off-by: Haggai Eran <haggaie@mellanox.com> [ Add stub for ib_umem_copy_from() for CONFIG_INFINIBAND_USER_MEM=n - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
parent
c5d76f130b
commit
c1395a2a8c
@ -503,6 +503,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
|
||||
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
|
||||
void *buffer, u32 length);
|
||||
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
|
||||
int vector, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
@ -101,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
|
||||
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
|
||||
*
|
||||
* @qp: QP to copy from.
|
||||
* @send: copy from the send queue when non-zero, use the receive queue
|
||||
* otherwise.
|
||||
* @wqe_index: index to start copying from. For send work queues, the
|
||||
* wqe_index is in units of MLX5_SEND_WQE_BB.
|
||||
* For receive work queue, it is the number of work queue
|
||||
* element in the queue.
|
||||
* @buffer: destination buffer.
|
||||
* @length: maximum number of bytes to copy.
|
||||
*
|
||||
* Copies at least a single WQE, but may copy more data.
|
||||
*
|
||||
* Return: the number of bytes copied, or an error code.
|
||||
*/
|
||||
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
|
||||
void *buffer, u32 length)
|
||||
{
|
||||
struct ib_device *ibdev = qp->ibqp.device;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
|
||||
size_t offset;
|
||||
size_t wq_end;
|
||||
struct ib_umem *umem = qp->umem;
|
||||
u32 first_copy_length;
|
||||
int wqe_length;
|
||||
int ret;
|
||||
|
||||
if (wq->wqe_cnt == 0) {
|
||||
mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
|
||||
qp->ibqp.qp_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
|
||||
wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
|
||||
|
||||
if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
|
||||
return -EINVAL;
|
||||
|
||||
if (offset > umem->length ||
|
||||
(send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
|
||||
return -EINVAL;
|
||||
|
||||
first_copy_length = min_t(u32, offset + length, wq_end) - offset;
|
||||
ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (send) {
|
||||
struct mlx5_wqe_ctrl_seg *ctrl = buffer;
|
||||
int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
|
||||
|
||||
wqe_length = ds * MLX5_WQE_DS_UNITS;
|
||||
} else {
|
||||
wqe_length = 1 << wq->wqe_shift;
|
||||
}
|
||||
|
||||
if (wqe_length <= first_copy_length)
|
||||
return first_copy_length;
|
||||
|
||||
ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
|
||||
wqe_length - first_copy_length);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return wqe_length;
|
||||
}
|
||||
|
||||
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
|
||||
{
|
||||
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
|
||||
|
@ -189,6 +189,9 @@ struct mlx5_wqe_ctrl_seg {
|
||||
__be32 imm;
|
||||
};
|
||||
|
||||
#define MLX5_WQE_CTRL_DS_MASK 0x3f
|
||||
#define MLX5_WQE_DS_UNITS 16
|
||||
|
||||
struct mlx5_wqe_xrc_seg {
|
||||
__be32 xrc_srqn;
|
||||
u8 rsvd[12];
|
||||
|
@ -98,7 +98,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
|
||||
}
|
||||
static inline void ib_umem_release(struct ib_umem *umem) { }
|
||||
static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
|
||||
|
||||
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
|
||||
size_t length) {
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_INFINIBAND_USER_MEM */
|
||||
|
||||
#endif /* IB_UMEM_H */
|
||||
|
Loading…
Reference in New Issue
Block a user