virtio_ring: put mapping error check in vring_map_one_sg

This patch put the dma addr error check in vring_map_one_sg().

The benefits of doing this:

1. reduce one judgment of vq->use_dma_api.
2. make vring_map_one_sg more simple, without calling
   vring_mapping_error to check the return value. simplifies subsequent
   code

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20230810123057.43407-3-xuanzhuo@linux.alibaba.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Xuan Zhuo 2023-08-10 20:30:47 +08:00 committed by Michael S. Tsirkin
parent 610c708bf8
commit 0e27fa6dde

View File

@ -355,9 +355,8 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
}
/* Map one sg entry. */
static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
enum dma_data_direction direction, dma_addr_t *addr)
{
if (!vq->use_dma_api) {
/*
@ -366,7 +365,8 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* depending on the direction.
*/
kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
return (dma_addr_t)sg_phys(sg);
*addr = (dma_addr_t)sg_phys(sg);
return 0;
}
/*
@ -374,9 +374,14 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
return dma_map_page(vring_dma_dev(vq),
*addr = dma_map_page(vring_dma_dev(vq),
sg_page(sg), sg->offset, sg->length,
direction);
if (dma_mapping_error(vring_dma_dev(vq), *addr))
return -ENOMEM;
return 0;
}
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
@ -588,8 +593,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
if (vring_mapping_error(vq, addr))
dma_addr_t addr;
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
goto unmap_release;
prev = i;
@ -603,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
dma_addr_t addr;
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
goto unmap_release;
prev = i;
@ -1281,9 +1288,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
addr = vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
if (vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
@ -1428,9 +1434,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
dma_addr_t addr;
if (vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |