mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 18:56:24 +00:00
ublk: kill queuing request by task_work_add
task_work_add() is used from early ublk development stage for handling request in batch. However, since commit 7d4a93176e01 ("ublk_drv: don't forward io commands in reserve order"), we can get similar batch processing with io_uring_cmd_complete_in_task(), and similar performance data is observed between task_work_add() and io_uring_cmd_complete_in_task(). Meantime we can kill one fast code path, which is actually seldom used given it is common to build ublk driver as module. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20230519065030.351216-2-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9a67aa52a4
commit
29dc5d0661
@ -62,7 +62,6 @@
|
||||
|
||||
struct ublk_rq_data {
|
||||
struct llist_node node;
|
||||
struct callback_head work;
|
||||
};
|
||||
|
||||
struct ublk_uring_cmd_pdu {
|
||||
@ -290,14 +289,6 @@ static int ublk_apply_params(struct ublk_device *ub)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
|
||||
{
|
||||
if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
|
||||
!(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
|
||||
{
|
||||
return ubq->flags & UBLK_F_NEED_GET_DATA;
|
||||
@ -852,17 +843,6 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
|
||||
ublk_forward_io_cmds(ubq, issue_flags);
|
||||
}
|
||||
|
||||
static void ublk_rq_task_work_fn(struct callback_head *work)
|
||||
{
|
||||
struct ublk_rq_data *data = container_of(work,
|
||||
struct ublk_rq_data, work);
|
||||
struct request *req = blk_mq_rq_from_pdu(data);
|
||||
struct ublk_queue *ubq = req->mq_hctx->driver_data;
|
||||
unsigned issue_flags = IO_URING_F_UNLOCKED;
|
||||
|
||||
ublk_forward_io_cmds(ubq, issue_flags);
|
||||
}
|
||||
|
||||
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
|
||||
{
|
||||
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
|
||||
@ -886,10 +866,6 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
|
||||
*/
|
||||
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
|
||||
ublk_abort_io_cmds(ubq);
|
||||
} else if (ublk_can_use_task_work(ubq)) {
|
||||
if (task_work_add(ubq->ubq_daemon, &data->work,
|
||||
TWA_SIGNAL_NO_IPI))
|
||||
ublk_abort_io_cmds(ubq);
|
||||
} else {
|
||||
struct io_uring_cmd *cmd = io->cmd;
|
||||
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
|
||||
@ -961,19 +937,9 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
|
||||
|
||||
init_task_work(&data->work, ublk_rq_task_work_fn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct blk_mq_ops ublk_mq_ops = {
|
||||
.queue_rq = ublk_queue_rq,
|
||||
.init_hctx = ublk_init_hctx,
|
||||
.init_request = ublk_init_rq,
|
||||
.timeout = ublk_timeout,
|
||||
};
|
||||
|
||||
@ -1813,10 +1779,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
|
||||
*/
|
||||
ub->dev_info.flags &= UBLK_F_ALL;
|
||||
|
||||
if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
|
||||
ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
|
||||
|
||||
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
|
||||
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
|
||||
UBLK_F_URING_CMD_COMP_IN_TASK;
|
||||
|
||||
/* We are not ready to support zero copy */
|
||||
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
|
||||
|
Loading…
x
Reference in New Issue
Block a user