svcrdma: Add an async version of svc_rdma_write_info_free()

DMA unmapping can take quite some time, so it should not be handled
in a single-threaded completion handler. Defer releasing write_info
structs to the recently-added workqueue.

With this patch, DMA unmapping can be handled in parallel, and it
does not cause head-of-queue blocking of Write completions.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Chuck Lever 2023-11-21 11:40:39 -05:00
parent ae225fe27b
commit f09c36c8df

View File

@ -227,6 +227,7 @@ struct svc_rdma_write_info {
unsigned int wi_next_off;
struct svc_rdma_chunk_ctxt wi_cc;
struct work_struct wi_work;
};
static struct svc_rdma_write_info *
@ -248,12 +249,21 @@ svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
return info;
}
static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
static void svc_rdma_write_info_free_async(struct work_struct *work)
{
struct svc_rdma_write_info *info;
info = container_of(work, struct svc_rdma_write_info, wi_work);
svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
kfree(info);
}
static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
{
INIT_WORK(&info->wi_work, svc_rdma_write_info_free_async);
queue_work(svcrdma_wq, &info->wi_work);
}
/**
* svc_rdma_write_done - Write chunk completion
* @cq: controlling Completion Queue