mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
block: rename REQ_HIPRI to REQ_POLLED
Unlike the RWF_HIPRI userspace ABI which is intentionally kept vague, the bio flag is specific to the polling implementation, so rename and document it properly. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-12-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d729cf9acb
commit
6ce913fe3e
@ -842,7 +842,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
|
||||
}
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
||||
bio_clear_hipri(bio);
|
||||
bio_clear_polled(bio);
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
|
@ -318,8 +318,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
||||
* iopoll in direct IO routine. Given performance gain of iopoll for
|
||||
* big IO can be trival, disable iopoll when split needed.
|
||||
*/
|
||||
bio_clear_hipri(bio);
|
||||
|
||||
bio_clear_polled(bio);
|
||||
return bio_split(bio, sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ static const char *const cmd_flag_name[] = {
|
||||
CMD_FLAG_NAME(BACKGROUND),
|
||||
CMD_FLAG_NAME(NOWAIT),
|
||||
CMD_FLAG_NAME(NOUNMAP),
|
||||
CMD_FLAG_NAME(HIPRI),
|
||||
CMD_FLAG_NAME(POLLED),
|
||||
};
|
||||
#undef CMD_FLAG_NAME
|
||||
|
||||
|
@ -732,7 +732,7 @@ bool blk_mq_complete_request_remote(struct request *rq)
|
||||
* For a polled request, always complete locallly, it's pointless
|
||||
* to redirect the completion.
|
||||
*/
|
||||
if (rq->cmd_flags & REQ_HIPRI)
|
||||
if (rq->cmd_flags & REQ_POLLED)
|
||||
return false;
|
||||
|
||||
if (blk_mq_complete_need_ipi(rq)) {
|
||||
@ -2278,7 +2278,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
hipri = bio->bi_opf & REQ_HIPRI;
|
||||
hipri = bio->bi_opf & REQ_POLLED;
|
||||
|
||||
plug = blk_mq_plug(q, bio);
|
||||
if (plug && plug->cached_rq) {
|
||||
|
@ -106,9 +106,9 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
|
||||
enum hctx_type type = HCTX_TYPE_DEFAULT;
|
||||
|
||||
/*
|
||||
* The caller ensure that if REQ_HIPRI, poll must be enabled.
|
||||
* The caller ensure that if REQ_POLLED, poll must be enabled.
|
||||
*/
|
||||
if (flags & REQ_HIPRI)
|
||||
if (flags & REQ_POLLED)
|
||||
type = HCTX_TYPE_POLL;
|
||||
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
|
||||
type = HCTX_TYPE_READ;
|
||||
|
@ -416,11 +416,11 @@ extern struct device_attribute dev_attr_events;
|
||||
extern struct device_attribute dev_attr_events_async;
|
||||
extern struct device_attribute dev_attr_events_poll_msecs;
|
||||
|
||||
static inline void bio_clear_hipri(struct bio *bio)
|
||||
static inline void bio_clear_polled(struct bio *bio)
|
||||
{
|
||||
/* can't support alloc cache if we turn off polling */
|
||||
bio_clear_flag(bio, BIO_PERCPU_CACHE);
|
||||
bio->bi_opf &= ~REQ_HIPRI;
|
||||
bio->bi_opf &= ~REQ_POLLED;
|
||||
}
|
||||
|
||||
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
|
||||
|
@ -632,7 +632,7 @@ static inline void nvme_init_request(struct request *req,
|
||||
|
||||
req->cmd_flags |= REQ_FAILFAST_DRIVER;
|
||||
if (req->mq_hctx->type == HCTX_TYPE_POLL)
|
||||
req->cmd_flags |= REQ_HIPRI;
|
||||
req->cmd_flags |= REQ_POLLED;
|
||||
nvme_clear_nvme_request(req);
|
||||
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
|
||||
}
|
||||
|
@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||||
{
|
||||
bool new_sd_dp;
|
||||
bool inject = false;
|
||||
bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
|
||||
bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
|
||||
int k, num_in_q, qdepth;
|
||||
unsigned long iflags;
|
||||
u64 ns_from_boot = 0;
|
||||
@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||||
if (sdebug_host_max_queue)
|
||||
sd_dp->hc_idx = get_tag(cmnd);
|
||||
|
||||
if (hipri)
|
||||
if (polled)
|
||||
ns_from_boot = ktime_get_boottime_ns();
|
||||
|
||||
/* one of the resp_*() response functions is called here */
|
||||
@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||||
kt -= d;
|
||||
}
|
||||
}
|
||||
if (hipri) {
|
||||
if (polled) {
|
||||
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
|
||||
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
||||
if (!sd_dp->init_poll) {
|
||||
@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||||
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
|
||||
atomic_read(&sdeb_inject_pending)))
|
||||
sd_dp->aborted = true;
|
||||
if (hipri) {
|
||||
if (polled) {
|
||||
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
|
||||
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
||||
if (!sd_dp->init_poll) {
|
||||
@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
||||
if (kt_from_boot < sd_dp->cmpl_ts)
|
||||
continue;
|
||||
|
||||
} else /* ignoring non REQ_HIPRI requests */
|
||||
} else /* ignoring non REQ_POLLED requests */
|
||||
continue;
|
||||
devip = (struct sdebug_dev_info *)scp->device->hostdata;
|
||||
if (likely(devip))
|
||||
|
@ -706,7 +706,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||
*/
|
||||
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
|
||||
{
|
||||
bio->bi_opf |= REQ_HIPRI;
|
||||
bio->bi_opf |= REQ_POLLED;
|
||||
if (!is_sync_kiocb(kiocb))
|
||||
bio->bi_opf |= REQ_NOWAIT;
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ enum req_flag_bits {
|
||||
/* command specific flags for REQ_OP_WRITE_ZEROES: */
|
||||
__REQ_NOUNMAP, /* do not free blocks when zeroing */
|
||||
|
||||
__REQ_HIPRI,
|
||||
__REQ_POLLED, /* caller polls for completion using blk_poll */
|
||||
|
||||
/* for driver use */
|
||||
__REQ_DRV,
|
||||
@ -409,7 +409,7 @@ enum req_flag_bits {
|
||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
#define REQ_HIPRI (1ULL << __REQ_HIPRI)
|
||||
#define REQ_POLLED (1ULL << __REQ_POLLED)
|
||||
|
||||
#define REQ_DRV (1ULL << __REQ_DRV)
|
||||
#define REQ_SWAP (1ULL << __REQ_SWAP)
|
||||
|
@ -416,7 +416,7 @@ int swap_readpage(struct page *page, bool synchronous)
|
||||
* attempt to access it in the page fault retry time check.
|
||||
*/
|
||||
if (synchronous) {
|
||||
bio->bi_opf |= REQ_HIPRI;
|
||||
bio->bi_opf |= REQ_POLLED;
|
||||
get_task_struct(current);
|
||||
bio->bi_private = current;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user