mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
block: remove the per-bio/request write hint
With the NVMe support for this gone, there are no consumers of these hints left, so remove them. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220304175556.407719-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
85e6c77576
commit
c75e707fe1
@ -257,7 +257,6 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
||||
bio->bi_opf = opf;
|
||||
bio->bi_flags = 0;
|
||||
bio->bi_ioprio = 0;
|
||||
bio->bi_write_hint = 0;
|
||||
bio->bi_status = 0;
|
||||
bio->bi_iter.bi_sector = 0;
|
||||
bio->bi_iter.bi_size = 0;
|
||||
@ -737,7 +736,6 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
|
||||
bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
|
@ -170,7 +170,6 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
|
@ -782,13 +782,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (req->write_hint != next->write_hint)
|
||||
return NULL;
|
||||
|
||||
if (req->ioprio != next->ioprio)
|
||||
return NULL;
|
||||
|
||||
@ -915,13 +908,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
!blk_write_same_mergeable(rq->bio, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (rq->write_hint != bio->bi_write_hint)
|
||||
return false;
|
||||
|
||||
if (rq->ioprio != bio_prio(bio))
|
||||
return false;
|
||||
|
||||
|
@ -183,35 +183,11 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
|
||||
return count;
|
||||
}
|
||||
|
||||
static int queue_write_hint_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t queue_write_hint_store(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
q->write_hints[i] = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
||||
{ "poll_stat", 0400, queue_poll_stat_show },
|
||||
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
|
||||
{ "pm_only", 0600, queue_pm_only_show, NULL },
|
||||
{ "state", 0600, queue_state_show, queue_state_write },
|
||||
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
|
||||
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
|
||||
{ },
|
||||
};
|
||||
|
@ -2406,7 +2406,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||
rq->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
|
||||
rq->__sector = bio->bi_iter.bi_sector;
|
||||
rq->write_hint = bio->bi_write_hint;
|
||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||
|
||||
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
|
||||
|
@ -169,7 +169,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
|
@ -83,7 +83,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
|
||||
}
|
||||
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio.bi_write_hint = iocb->ki_hint;
|
||||
bio.bi_private = current;
|
||||
bio.bi_end_io = blkdev_bio_end_io_simple;
|
||||
bio.bi_ioprio = iocb->ki_ioprio;
|
||||
@ -225,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
|
||||
for (;;) {
|
||||
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = blkdev_bio_end_io;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
@ -327,7 +325,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
dio->flags = 0;
|
||||
dio->iocb = iocb;
|
||||
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_end_io = blkdev_bio_end_io_async;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
|
||||
|
@ -1137,8 +1137,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
|
||||
goto skip_copy;
|
||||
}
|
||||
|
||||
behind_bio->bi_write_hint = bio->bi_write_hint;
|
||||
|
||||
while (i < vcnt && size) {
|
||||
struct page *page;
|
||||
int len = min_t(int, PAGE_SIZE, size);
|
||||
|
@ -467,7 +467,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
|
||||
bio_set_dev(bio, log->rdev->bdev);
|
||||
bio->bi_iter.bi_sector = log->next_io_sector;
|
||||
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
|
||||
bio->bi_write_hint = ppl_conf->write_hint;
|
||||
|
||||
pr_debug("%s: log->current_io_sector: %llu\n", __func__,
|
||||
(unsigned long long)log->next_io_sector);
|
||||
@ -497,7 +496,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
|
||||
bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
|
||||
prev->bi_opf, GFP_NOIO,
|
||||
&ppl_conf->bs);
|
||||
bio->bi_write_hint = prev->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
|
||||
|
||||
@ -1397,7 +1395,6 @@ int ppl_init_log(struct r5conf *conf)
|
||||
atomic64_set(&ppl_conf->seq, 0);
|
||||
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
|
||||
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
|
||||
ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
|
||||
if (!mddev->external) {
|
||||
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
|
||||
@ -1496,25 +1493,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
|
||||
static ssize_t
|
||||
ppl_write_hint_show(struct mddev *mddev, char *buf)
|
||||
{
|
||||
size_t ret = 0;
|
||||
struct r5conf *conf;
|
||||
struct ppl_conf *ppl_conf = NULL;
|
||||
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf && raid5_has_ppl(conf))
|
||||
ppl_conf = conf->log_private;
|
||||
ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
|
||||
spin_unlock(&mddev->lock);
|
||||
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", 0);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
|
||||
{
|
||||
struct r5conf *conf;
|
||||
struct ppl_conf *ppl_conf;
|
||||
int err = 0;
|
||||
unsigned short new;
|
||||
|
||||
@ -1528,17 +1513,10 @@ ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
|
||||
return err;
|
||||
|
||||
conf = mddev->private;
|
||||
if (!conf) {
|
||||
if (!conf)
|
||||
err = -ENODEV;
|
||||
} else if (raid5_has_ppl(conf)) {
|
||||
ppl_conf = conf->log_private;
|
||||
if (!ppl_conf)
|
||||
err = -EINVAL;
|
||||
else
|
||||
ppl_conf->write_hint = new;
|
||||
} else {
|
||||
else if (!raid5_has_ppl(conf) || !conf->log_private)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
mddev_unlock(mddev);
|
||||
|
||||
|
@ -1210,9 +1210,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
|
||||
bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
|
||||
bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
|
||||
bi->bi_write_hint = sh->dev[i].write_hint;
|
||||
if (!rrdev)
|
||||
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
/*
|
||||
* If this is discard request, set bi_vcnt 0. We don't
|
||||
* want to confuse SCSI because SCSI will replace payload
|
||||
@ -1264,8 +1261,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
|
||||
rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
|
||||
rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
|
||||
rbi->bi_write_hint = sh->dev[i].write_hint;
|
||||
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
/*
|
||||
* If this is discard request, set bi_vcnt 0. We don't
|
||||
* want to confuse SCSI because SCSI will replace payload
|
||||
@ -3416,7 +3411,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
|
||||
(unsigned long long)sh->sector);
|
||||
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
sh->dev[dd_idx].write_hint = bi->bi_write_hint;
|
||||
/* Don't allow new IO added to stripes in batch list */
|
||||
if (sh->batch_head)
|
||||
goto overlap;
|
||||
|
@ -3321,7 +3321,6 @@ static int alloc_new_bio(struct btrfs_inode *inode,
|
||||
bio_ctrl->bio_flags = bio_flags;
|
||||
bio->bi_end_io = end_io_func;
|
||||
bio->bi_private = &inode->io_tree;
|
||||
bio->bi_write_hint = inode->vfs_inode.i_write_hint;
|
||||
bio->bi_opf = opf;
|
||||
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
|
||||
if (ret < 0)
|
||||
|
13
fs/buffer.c
13
fs/buffer.c
@ -53,7 +53,7 @@
|
||||
|
||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
enum rw_hint hint, struct writeback_control *wbc);
|
||||
struct writeback_control *wbc);
|
||||
|
||||
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
||||
|
||||
@ -1806,8 +1806,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
do {
|
||||
struct buffer_head *next = bh->b_this_page;
|
||||
if (buffer_async_write(bh)) {
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
|
||||
inode->i_write_hint, wbc);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
||||
nr_underway++;
|
||||
}
|
||||
bh = next;
|
||||
@ -1861,8 +1860,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
struct buffer_head *next = bh->b_this_page;
|
||||
if (buffer_async_write(bh)) {
|
||||
clear_buffer_dirty(bh);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
|
||||
inode->i_write_hint, wbc);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
||||
nr_underway++;
|
||||
}
|
||||
bh = next;
|
||||
@ -3008,7 +3006,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
||||
}
|
||||
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
enum rw_hint write_hint, struct writeback_control *wbc)
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
@ -3034,7 +3032,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
|
||||
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
||||
bio->bi_write_hint = write_hint;
|
||||
|
||||
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
|
||||
BUG_ON(bio->bi_iter.bi_size != bh->b_size);
|
||||
@ -3056,7 +3053,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
|
||||
int submit_bh(int op, int op_flags, struct buffer_head *bh)
|
||||
{
|
||||
return submit_bh_wbc(op, op_flags, bh, 0, NULL);
|
||||
return submit_bh_wbc(op, op_flags, bh, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bh);
|
||||
|
||||
|
@ -402,9 +402,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
|
||||
bio->bi_end_io = dio_bio_end_aio;
|
||||
else
|
||||
bio->bi_end_io = dio_bio_end_io;
|
||||
|
||||
bio->bi_write_hint = dio->iocb->ki_hint;
|
||||
|
||||
sdio->bio = bio;
|
||||
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
|
||||
}
|
||||
|
@ -373,7 +373,6 @@ void ext4_io_submit(struct ext4_io_submit *io)
|
||||
if (bio) {
|
||||
if (io->io_wbc->sync_mode == WB_SYNC_ALL)
|
||||
io->io_bio->bi_opf |= REQ_SYNC;
|
||||
io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
|
||||
submit_bio(io->io_bio);
|
||||
}
|
||||
io->io_bio = NULL;
|
||||
@ -418,10 +417,8 @@ static void io_submit_add_bh(struct ext4_io_submit *io,
|
||||
submit_and_retry:
|
||||
ext4_io_submit(io);
|
||||
}
|
||||
if (io->io_bio == NULL) {
|
||||
if (io->io_bio == NULL)
|
||||
io_submit_init_bio(io, bh);
|
||||
io->io_bio->bi_write_hint = inode->i_write_hint;
|
||||
}
|
||||
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
|
||||
if (ret != bh->b_size)
|
||||
goto submit_and_retry;
|
||||
|
@ -403,8 +403,6 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
|
||||
} else {
|
||||
bio->bi_end_io = f2fs_write_end_io;
|
||||
bio->bi_private = sbi;
|
||||
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
|
||||
fio->type, fio->temp);
|
||||
}
|
||||
iostat_alloc_and_bind_ctx(sbi, bio, NULL);
|
||||
|
||||
|
@ -491,7 +491,6 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
|
||||
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
|
||||
bio_clone_blkg_association(new, prev);
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
new->bi_write_hint = prev->bi_write_hint;
|
||||
bio_chain(new, prev);
|
||||
submit_bio(prev);
|
||||
return new;
|
||||
|
@ -1232,7 +1232,6 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
|
||||
REQ_OP_WRITE | wbc_to_write_flags(wbc),
|
||||
GFP_NOFS, &iomap_ioend_bioset);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_write_hint = inode->i_write_hint;
|
||||
wbc_init_bio(wbc, bio);
|
||||
|
||||
ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
|
||||
@ -1263,7 +1262,6 @@ iomap_chain_bio(struct bio *prev)
|
||||
new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
|
||||
bio_clone_blkg_association(new, prev);
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
new->bi_write_hint = prev->bi_write_hint;
|
||||
|
||||
bio_chain(prev, new);
|
||||
bio_get(prev); /* for iomap_finish_ioend */
|
||||
|
@ -309,7 +309,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
|
||||
|
||||
bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL);
|
||||
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
|
||||
bio->bi_write_hint = dio->iocb->ki_hint;
|
||||
bio->bi_ioprio = dio->iocb->ki_ioprio;
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = iomap_dio_bio_end_io;
|
||||
|
@ -588,7 +588,6 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
||||
GFP_NOFS);
|
||||
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
|
||||
wbc_init_bio(wbc, bio);
|
||||
bio->bi_write_hint = inode->i_write_hint;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -695,7 +695,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
|
||||
bio = bio_alloc(bdev, nr_pages,
|
||||
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
|
||||
bio->bi_iter.bi_sector = zi->i_zsector;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
if (iocb->ki_flags & IOCB_DSYNC)
|
||||
bio->bi_opf |= REQ_FUA;
|
||||
|
@ -250,7 +250,6 @@ struct bio {
|
||||
*/
|
||||
unsigned short bi_flags; /* BIO_* below */
|
||||
unsigned short bi_ioprio;
|
||||
unsigned short bi_write_hint;
|
||||
blk_status_t bi_status;
|
||||
atomic_t __bi_remaining;
|
||||
|
||||
|
@ -518,9 +518,6 @@ struct request_queue {
|
||||
|
||||
bool mq_sysfs_init_done;
|
||||
|
||||
#define BLK_MAX_WRITE_HINTS 5
|
||||
u64 write_hints[BLK_MAX_WRITE_HINTS];
|
||||
|
||||
/*
|
||||
* Independent sector access ranges. This is always NULL for
|
||||
* devices that do not have multiple independent access ranges.
|
||||
|
Loading…
Reference in New Issue
Block a user