mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 23:00:21 +00:00
RDMA v5.16 fourth rc pull request
- Revert the patch fixing the DM related crash causing a widespread regression for kernel ULPs. A proper fix just didn't appear this cycle due to the holidays - Missing NULL check on alloc in uverbs - Double free in rxe error paths - Fix a new kernel-infoleak report when forming ah_attr's without GRH's in ucma -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmHXiq8ACgkQOG33FX4g mxqGYQ//XvZDj6pvhyepeEX3FcLG4mlpeFMAR+GQg1K4HHQgtaFjWv96j9aku4/+ G0uit4J4U4fVVDCKxIwuuYrOh9KK2r8JIpcbbsPMYb0KQyvBh/ugXta4lVQYzo7o h5qiNEmdRx2ugKzMImwRS3HEt7XAIoaysmXlm5FskOP7AYlYew8hS7P29NnnD3BO ixysSsvkZXX4N+geBw1YEHZ03W2/7DXRlXXAU4m8lh1ktKIBRwZwmyo2W7AJlTtd aJOzZ85zzrkwhwaUhXxCJuKXsCOP774l1TPjbOv0aenEjeLGNBHuxbLcphBpwJ3A JASx/VbDZzVZiRwL5TTpxuWVvBbxJdN8TU8QhOJqlnYMPf2IV8q7S/2qTkFm5Dnb miaFYVkXWr8MV3Bq4yAvRWBx3Cues5FBZ7Te9lIp8lJsddrweMw00OVj8HKrJU2Q gHVgBLfrPFkpohFe+7nSR4p9m47ssRy+/Ey5yPvkK21tePLlQi0lpCLpbioDA47O cOI4y0OSHm4QZIKYWcy3ux3F6RoCzbl1Smg0Yma4+UO60IisCyS/OtEU6R6zi/D7 whplbKIhsDc0//tuuKOqdiVjyTqU4WQ3CXr3uSDClzXjfCnCCJpIHytIBKn0Z8Ow 4IqY0iY7mFzxf6DbzRGSNUF4BERALUVGpSQkRiGwikEkWU89Ou8= =A1/J -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Last pull for 5.16, the reversion has been known for a while now but didn't get a proper fix in time. Looks like we will have several info-leak bugs to take care of going foward. - Revert the patch fixing the DM related crash causing a widespread regression for kernel ULPs. A proper fix just didn't appear this cycle due to the holidays - Missing NULL check on alloc in uverbs - Double free in rxe error paths - Fix a new kernel-infoleak report when forming ah_attr's without GRH's in ucma" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/core: Don't infoleak GRH fields RDMA/uverbs: Check for null return of kmalloc_array Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow" RDMA/rxe: Prevent double freeing rxe_map_set()
This commit is contained in:
commit
ddec8ed2d4
@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
|
||||
struct rdma_ah_attr *src = ah_attr;
|
||||
struct rdma_ah_attr conv_ah;
|
||||
|
||||
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
|
||||
memset(&dst->grh, 0, sizeof(dst->grh));
|
||||
|
||||
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
|
||||
(rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
|
||||
|
@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
|
||||
uapi->num_write_ex = max_write_ex + 1;
|
||||
data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
|
||||
sizeof(*uapi->write_methods), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
|
||||
data[i] = &uapi->notsupp_method;
|
||||
uapi->write_methods = data;
|
||||
|
@ -664,6 +664,7 @@ struct mlx5_ib_mr {
|
||||
|
||||
/* User MR data */
|
||||
struct mlx5_cache_ent *cache_ent;
|
||||
struct ib_umem *umem;
|
||||
|
||||
/* This is zero'd when the MR is allocated */
|
||||
union {
|
||||
@ -675,7 +676,7 @@ struct mlx5_ib_mr {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/* Used only by kernel MRs */
|
||||
/* Used only by kernel MRs (umem == NULL) */
|
||||
struct {
|
||||
void *descs;
|
||||
void *descs_alloc;
|
||||
@ -696,9 +697,8 @@ struct mlx5_ib_mr {
|
||||
int data_length;
|
||||
};
|
||||
|
||||
/* Used only by User MRs */
|
||||
/* Used only by User MRs (umem != NULL) */
|
||||
struct {
|
||||
struct ib_umem *umem;
|
||||
unsigned int page_shift;
|
||||
/* Current access_flags */
|
||||
int access_flags;
|
||||
|
@ -1904,18 +1904,19 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
||||
static void
|
||||
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
|
||||
int size = mr->max_descs * mr->desc_size;
|
||||
if (!mr->umem && mr->descs) {
|
||||
struct ib_device *device = mr->ibmr.device;
|
||||
int size = mr->max_descs * mr->desc_size;
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
|
||||
if (!mr->descs)
|
||||
return;
|
||||
|
||||
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree(mr->descs_alloc);
|
||||
mr->descs = NULL;
|
||||
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree(mr->descs_alloc);
|
||||
mr->descs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
@ -1991,8 +1992,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
if (mr->cache_ent) {
|
||||
mlx5_mr_cache_free(dev, mr);
|
||||
} else {
|
||||
if (!udata)
|
||||
mlx5_free_priv_descs(mr);
|
||||
mlx5_free_priv_descs(mr);
|
||||
kfree(mr);
|
||||
}
|
||||
return 0;
|
||||
@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_free_in;
|
||||
|
||||
mr->umem = NULL;
|
||||
kfree(in);
|
||||
|
||||
return mr;
|
||||
@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
}
|
||||
|
||||
mr->ibmr.device = pd->device;
|
||||
mr->umem = NULL;
|
||||
|
||||
switch (mr_type) {
|
||||
case IB_MR_TYPE_MEM_REG:
|
||||
|
@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
|
||||
|
||||
ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
return -ENOMEM;
|
||||
|
||||
if (both) {
|
||||
ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
|
||||
if (ret) {
|
||||
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
|
||||
goto err_out;
|
||||
}
|
||||
if (ret)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
err_free:
|
||||
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
|
||||
mr->cur_map_set = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
||||
pr_warn("%s: Unable to get virtual address\n",
|
||||
__func__);
|
||||
err = -ENOMEM;
|
||||
goto err_cleanup_map;
|
||||
goto err_release_umem;
|
||||
}
|
||||
|
||||
buf->addr = (uintptr_t)vaddr;
|
||||
@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup_map:
|
||||
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
|
||||
err_release_umem:
|
||||
ib_umem_release(umem);
|
||||
err_out:
|
||||
|
Loading…
x
Reference in New Issue
Block a user