for-5.20/block-2022-07-29

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmLko3gQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpmQaD/90NKFj4v8I456TUQyg1jimXEsL+e84E6o2
 ALWVb6JzQvlPVQXNLnK5YKIunMWOTtTMz0nyB8sVRwVJVJO0P5d7QopAkZM8fkyU
 MK5OCzoryENw4DTc2wJS4in6cSbGylIuN74wMzlf7+M67JTImfoZQhbTMcjwzZfn
 b3OlL6sID7zMXwGcuOJPZyUJICCpDhzdSF9JXqKma5PQuG2SBmQyvFxJAcsoFBPc
 YetnoRIOIN6yBvsIZaPaYq7XI9MIvF0e67EQtyCEHj4tHpyVnyDWkeObVFULsISU
 gGEKbkYPvNUzRAU5Q1NBBHh1tTfkf/MaUxTuZwoEwZ/s04IGBGMmrZGyfvdfzYo6
 M7NwSEg/TrUSNfTwn65mQi7uOXu1pGkJrqz84Flm8u9Qid9Vd7LExLG5p/ggnWdH
 5th93MDEmtEg29e9DXpEAuS5d0t3TtSvosflaKpyfNNfr+P0rWCN6GM/uW62VUTK
 ls69SQh/AQJRbg64jU4xper6WhaYtSXK7TKEnxJycoEn9gYNyCcdot2uekth0xRH
 ChHGmRlteiqe/y4uFWn/2dcxWjoleiHbFjTaiRL75WVl8wIDEjw02LGuoZ61Ss9H
 WOV+MT7KqNjBGe6lreUY+O/PO02dzmoR6heJXN19p8zr/pBuLCTGX7UpO7rzgaBR
 4N1HEozvIw==
 =celk
 -----END PGP SIGNATURE-----

Merge tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:

 - Improve the type checking of request flags (Bart)

 - Ensure queue mapping for a single queues always picks the right queue
   (Bart)

 - Sanitize the io priority handling (Jan)

 - rq-qos race fix (Jinke)

 - Reserved tags handling improvements (John)

 - Separate memory alignment from file/disk offset aligment for O_DIRECT
   (Keith)

 - Add new ublk driver, userspace block driver using io_uring for
   communication with the userspace backend (Ming)

 - Use try_cmpxchg() to cleanup the code in various spots (Uros)

 - Finally remove bdevname() (Christoph)

 - Clean up the zoned device handling (Christoph)

 - Clean up independent access range support (Christoph)

 - Clean up and improve block sysfs handling (Christoph)

 - Clean up and improve teardown of block devices.

   This turns the usual two step process into something that is simpler
   to implement and handle in block drivers (Christoph)

 - Clean up chunk size handling (Christoph)

 - Misc cleanups and fixes (Bart, Bo, Dan, GuoYong, Jason, Keith, Liu,
   Ming, Sebastian, Yang, Ying)

* tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block: (178 commits)
  ublk_drv: fix double shift bug
  ublk_drv: make sure that correct flags(features) returned to userspace
  ublk_drv: fix error handling of ublk_add_dev
  ublk_drv: fix lockdep warning
  block: remove __blk_get_queue
  block: call blk_mq_exit_queue from disk_release for never added disks
  blk-mq: fix error handling in __blk_mq_alloc_disk
  ublk: defer disk allocation
  ublk: rewrite ublk_ctrl_get_queue_affinity to not rely on hctx->cpumask
  ublk: fold __ublk_create_dev into ublk_ctrl_add_dev
  ublk: cleanup ublk_ctrl_uring_cmd
  ublk: simplify ublk_ch_open and ublk_ch_release
  ublk: remove the empty open and release block device operations
  ublk: remove UBLK_IO_F_PREFLUSH
  ublk: add a MAINTAINERS entry
  block: don't allow the same type rq_qos add more than once
  mmc: fix disk/queue leak in case of adding disk failure
  ublk_drv: fix an IS_ERR() vs NULL check
  ublk: remove UBLK_IO_F_INTEGRITY
  ublk_drv: remove unneeded semicolon
  ...
This commit is contained in:
Linus Torvalds 2022-08-02 13:46:35 -07:00
commit c013d0af81
261 changed files with 3640 additions and 2163 deletions

View File

@ -260,6 +260,15 @@ Description:
for discards, and don't read this file.
What: /sys/block/<disk>/queue/dma_alignment
Date: May 2022
Contact: linux-block@vger.kernel.org
Description:
Reports the alignment that user space addresses must have to be
used for raw block device access with O_DIRECT and other driver
specific passthrough mechanisms.
What: /sys/block/<disk>/queue/fua
Date: May 2018
Contact: linux-block@vger.kernel.org

View File

@ -87,8 +87,7 @@ with the command.
1.2.2 Completing a scmd w/ timeout
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The timeout handler is scsi_times_out(). When a timeout occurs, this
function
The timeout handler is scsi_timeout(). When a timeout occurs, this function
1. invokes optional hostt->eh_timed_out() callback. Return value can
be one of

View File

@ -731,7 +731,7 @@ Details::
* Notes: If 'no_async_abort' is defined this callback
* will be invoked from scsi_eh thread. No other commands
* will then be queued on current host during eh.
* Otherwise it will be called whenever scsi_times_out()
* Otherwise it will be called whenever scsi_timeout()
* is called due to a command timeout.
*
* Optionally defined in: LLD

View File

@ -20539,6 +20539,13 @@ F: Documentation/filesystems/ubifs-authentication.rst
F: Documentation/filesystems/ubifs.rst
F: fs/ubifs/
UBLK USERSPACE BLOCK DRIVER
M: Ming Lei <ming.lei@redhat.com>
L: linux-block@vger.kernel.org
S: Maintained
F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
UCLINUX (M68KNOMMU AND COLDFIRE)
M: Greg Ungerer <gerg@linux-m68k.org>
L: linux-m68k@lists.linux-m68k.org

View File

@ -138,7 +138,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
return 0;
out_cleanup_disk:
blk_cleanup_disk(dev->disk);
put_disk(dev->disk);
free_dev:
kfree(dev);
out:
@ -180,7 +180,7 @@ static void __exit nfhd_exit(void)
list_for_each_entry_safe(dev, next, &nfhd_list, list) {
list_del(&dev->list);
del_gendisk(dev->disk);
blk_cleanup_disk(dev->disk);
put_disk(dev->disk);
kfree(dev);
}
unregister_blkdev(major_num, "nfhd");

View File

@ -925,7 +925,7 @@ static int ubd_add(int n, char **error_out)
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&ubd_dev->tag_set);
out:
@ -1032,7 +1032,7 @@ static int ubd_remove(int n, char **error_out)
ubd_gendisk[n] = NULL;
if(disk != NULL){
del_gendisk(disk);
blk_cleanup_disk(disk);
put_disk(disk);
}
err = 0;
@ -1262,7 +1262,7 @@ static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
struct req_iterator iter;
int i = 0;
unsigned long byte_offset = io_req->offset;
int op = req_op(req);
enum req_op op = req_op(req);
if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
io_req->io_desc[0].buffer = NULL;
@ -1325,7 +1325,7 @@ static int ubd_submit_request(struct ubd *dev, struct request *req)
int segs = 0;
struct io_thread_req *io_req;
int ret;
int op = req_op(req);
enum req_op op = req_op(req);
if (op == REQ_OP_FLUSH)
segs = 0;

View File

@ -290,7 +290,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
return 0;
out_cleanup_disk:
blk_cleanup_disk(dev->gd);
put_disk(dev->gd);
out:
return err;
}
@ -344,7 +344,7 @@ static void simdisk_teardown(struct simdisk *dev, int which,
simdisk_detach(dev);
if (dev->gd) {
del_gendisk(dev->gd);
blk_cleanup_disk(dev->gd);
put_disk(dev->gd);
}
remove_proc_entry(tmp, procdir);
}

View File

@ -147,7 +147,6 @@ config BLK_CGROUP_FC_APPID
config BLK_CGROUP_IOCOST
bool "Enable support for cost model based cgroup IO controller"
depends on BLK_CGROUP
select BLK_RQ_IO_DATA_LEN
select BLK_RQ_ALLOC_TIME
help
Enabling this option enables the .weight interface for cost

View File

@ -54,12 +54,10 @@ static void bdev_write_inode(struct block_device *bdev)
while (inode->i_state & I_DIRTY) {
spin_unlock(&inode->i_lock);
ret = write_inode_now(inode, true);
if (ret) {
char name[BDEVNAME_SIZE];
pr_warn_ratelimited("VFS: Dirty inode writeback failed "
"for block device %s (err=%d).\n",
bdevname(bdev, name), ret);
}
if (ret)
pr_warn_ratelimited(
"VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
bdev, ret);
spin_lock(&inode->i_lock);
}
spin_unlock(&inode->i_lock);

View File

@ -220,46 +220,46 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
}
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op)
blk_opf_t opf)
{
blkg_rwstat_add(&bfqg->stats.queued, op, 1);
blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
bfqg_stats_end_empty_time(&bfqg->stats);
if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
}
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
{
blkg_rwstat_add(&bfqg->stats.queued, op, -1);
blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
}
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
{
blkg_rwstat_add(&bfqg->stats.merged, op, 1);
blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
}
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op)
u64 io_start_time_ns, blk_opf_t opf)
{
struct bfqg_stats *stats = &bfqg->stats;
u64 now = ktime_get_ns();
if (now > io_start_time_ns)
blkg_rwstat_add(&stats->service_time, op,
blkg_rwstat_add(&stats->service_time, opf,
now - io_start_time_ns);
if (io_start_time_ns > start_time_ns)
blkg_rwstat_add(&stats->wait_time, op,
blkg_rwstat_add(&stats->wait_time, opf,
io_start_time_ns - start_time_ns);
}
#else /* CONFIG_BFQ_CGROUP_DEBUG */
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
blk_opf_t opf) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op) { }
u64 io_start_time_ns, blk_opf_t opf) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
@ -706,10 +706,10 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
/**
* __bfq_bic_change_cgroup - move @bic to @cgroup.
* __bfq_bic_change_cgroup - move @bic to @bfqg.
* @bfqd: the queue descriptor.
* @bic: the bic to move.
* @blkcg: the blk-cgroup to move to.
* @bfqg: the group to move to.
*
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
* sure that the reference to cgroup is valid across the call (see
@ -863,6 +863,7 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st)
* @bfqd: the device data structure with the root group.
* @entity: the entity to move, if entity is a leaf; or the parent entity
* of an active leaf entity to move, if entity is not a leaf.
* @ioprio_class: I/O priority class to reparent.
*/
static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
struct bfq_entity *entity,
@ -892,6 +893,7 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
* @bfqd: the device data structure with the root group.
* @bfqg: the group to move from.
* @st: the service tree to start the search from.
* @ioprio_class: I/O priority class to reparent.
*/
static void bfq_reparent_active_queues(struct bfq_data *bfqd,
struct bfq_group *bfqg,
@ -1471,8 +1473,6 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
return bfqq->bfqd->root_group;
}
void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)

View File

@ -668,19 +668,19 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
* significantly affect service guarantees coming from the BFQ scheduling
* algorithm.
*/
static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{
struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(op)) : NULL;
struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(opf)) : NULL;
int depth;
unsigned limit = data->q->nr_requests;
/* Sync reads have full depth available */
if (op_is_sync(op) && !op_is_write(op)) {
if (op_is_sync(opf) && !op_is_write(opf)) {
depth = 0;
} else {
depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
limit = (limit * depth) >> bfqd->full_depth_shift;
}
@ -693,7 +693,7 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
depth = 1;
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
__func__, bfqd->wr_busy_queues, op_is_sync(op), depth);
__func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
if (depth)
data->shallow_depth = depth;
}
@ -6104,7 +6104,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
static void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
unsigned int cmd_flags)
blk_opf_t cmd_flags)
{
if (!bfqq)
return;
@ -6129,7 +6129,7 @@ static void bfq_update_insert_stats(struct request_queue *q,
static inline void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
unsigned int cmd_flags) {}
blk_opf_t cmd_flags) {}
#endif /* CONFIG_BFQ_CGROUP_DEBUG */
static struct bfq_queue *bfq_init_rq(struct request *rq);
@ -6141,7 +6141,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq;
bool idle_timer_disabled = false;
unsigned int cmd_flags;
blk_opf_t cmd_flags;
LIST_HEAD(free);
#ifdef CONFIG_BFQ_GROUP_IOSCHED

View File

@ -994,11 +994,11 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
blk_opf_t opf);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op);
u64 io_start_time_ns, blk_opf_t opf);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);

View File

@ -1360,6 +1360,8 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
/**
* __bfq_lookup_next_entity - return the first eligible entity in @st.
* @st: the service tree.
* @in_service: whether or not there is an in-service entity for the sched_data
* this active tree belongs to.
*
* If there is no in-service entity for the sched_data st belongs to,
* then return the entity that will be set in service if:
@ -1472,9 +1474,6 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
break;
}
if (!entity)
return NULL;
return entity;
}

View File

@ -239,7 +239,7 @@ static void bio_free(struct bio *bio)
* when IO has completed, or when the bio is released.
*/
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf)
unsigned short max_vecs, blk_opf_t opf)
{
bio->bi_next = NULL;
bio->bi_bdev = bdev;
@ -292,7 +292,7 @@ EXPORT_SYMBOL(bio_init);
* preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio.
*/
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
{
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
@ -341,7 +341,7 @@ void bio_chain(struct bio *bio, struct bio *parent)
EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp)
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
{
struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
@ -409,7 +409,7 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
}
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
struct bio_set *bs)
{
struct bio_alloc_cache *cache;
@ -468,7 +468,7 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
* Returns: Pointer to new bio on success, NULL on failure.
*/
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
blk_opf_t opf, gfp_t gfp_mask,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
@ -1033,7 +1033,7 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
return 0;
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
return 0;
return bio_add_hw_page(q, bio, page, len, offset,
@ -1159,6 +1159,37 @@ static void bio_put_pages(struct page **pages, size_t size, size_t off)
put_page(pages[i]);
}
static int bio_iov_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
bool same_page = false;
if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
if (WARN_ON_ONCE(bio_full(bio, len)))
return -EINVAL;
__bio_add_page(bio, page, len, offset);
return 0;
}
if (same_page)
put_page(page);
return 0;
}
static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
bool same_page = false;
if (bio_add_hw_page(q, bio, page, len, offset,
queue_max_zone_append_sectors(q), &same_page) != len)
return -EINVAL;
if (same_page)
put_page(page);
return 0;
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@ -1177,61 +1208,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
bool same_page = false;
ssize_t size, left;
unsigned len, i;
size_t offset;
/*
* Move page array up in the allocated memory for the bio vecs as far as
* possible so that we can start filling biovecs from the beginning
* without overwriting the temporary page array.
*/
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
for (left = size, i = 0; left > 0; left -= len, i++) {
struct page *page = pages[i];
len = min_t(size_t, PAGE_SIZE - offset, left);
if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
if (same_page)
put_page(page);
} else {
if (WARN_ON_ONCE(bio_full(bio, len))) {
bio_put_pages(pages + i, left, offset);
return -EINVAL;
}
__bio_add_page(bio, page, len, offset);
}
offset = 0;
}
iov_iter_advance(iter, size);
return 0;
}
static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
ssize_t size, left;
unsigned len, i;
size_t offset;
int ret = 0;
if (WARN_ON_ONCE(!max_append_sectors))
return 0;
/*
* Move page array up in the allocated memory for the bio vecs as far as
* possible so that we can start filling biovecs from the beginning
@ -1240,28 +1220,39 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
/*
* Each segment in the iov is required to be a block size multiple.
* However, we may not be able to get the entire segment if it spans
* more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
* result to ensure the bio's total size is correct. The remainder of
* the iov data will be picked up in the next bio iteration.
*/
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (size > 0)
size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
if (unlikely(size <= 0))
return size ? size : -EFAULT;
for (left = size, i = 0; left > 0; left -= len, i++) {
struct page *page = pages[i];
bool same_page = false;
int ret;
len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_add_hw_page(q, bio, page, len, offset,
max_append_sectors, &same_page) != len) {
if (bio_op(bio) == REQ_OP_ZONE_APPEND)
ret = bio_iov_add_zone_append_page(bio, page, len,
offset);
else
ret = bio_iov_add_page(bio, page, len, offset);
if (ret) {
bio_put_pages(pages + i, left, offset);
ret = -EINVAL;
break;
return ret;
}
if (same_page)
put_page(page);
offset = 0;
}
iov_iter_advance(iter, size - left);
return ret;
iov_iter_advance(iter, size);
return 0;
}
/**
@ -1298,10 +1289,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
do {
if (bio_op(bio) == REQ_OP_ZONE_APPEND)
ret = __bio_iov_append_get_pages(bio, iter);
else
ret = __bio_iov_iter_get_pages(bio, iter);
ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
/* don't account direct I/O as memory stall */

View File

@ -59,20 +59,20 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
* caller is responsible for synchronizing calls to this function.
*/
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
unsigned int op, uint64_t val)
blk_opf_t opf, uint64_t val)
{
struct percpu_counter *cnt;
if (op_is_discard(op))
if (op_is_discard(opf))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
else if (op_is_write(op))
else if (op_is_write(opf))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
if (op_is_sync(op))
if (op_is_sync(opf))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];

View File

@ -846,6 +846,21 @@ static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
}
}
static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
struct blkg_iostat *last)
{
struct blkg_iostat delta;
unsigned long flags;
/* propagate percpu delta to global */
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&delta, cur);
blkg_iostat_sub(&delta, last);
blkg_iostat_add(&blkg->iostat.cur, &delta);
blkg_iostat_add(last, &delta);
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
}
static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
struct blkcg *blkcg = css_to_blkcg(css);
@ -860,8 +875,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
struct blkcg_gq *parent = blkg->parent;
struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
struct blkg_iostat cur, delta;
unsigned long flags;
struct blkg_iostat cur;
unsigned int seq;
/* fetch the current per-cpu values */
@ -870,23 +884,12 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
blkg_iostat_set(&cur, &bisc->cur);
} while (u64_stats_fetch_retry(&bisc->sync, seq));
/* propagate percpu delta to global */
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&delta, &cur);
blkg_iostat_sub(&delta, &bisc->last);
blkg_iostat_add(&blkg->iostat.cur, &delta);
blkg_iostat_add(&bisc->last, &delta);
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
blkcg_iostat_update(blkg, &cur, &bisc->last);
/* propagate global delta to parent (unless that's root) */
if (parent && parent->parent) {
flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
blkg_iostat_set(&delta, &blkg->iostat.cur);
blkg_iostat_sub(&delta, &blkg->iostat.last);
blkg_iostat_add(&parent->iostat.cur, &delta);
blkg_iostat_add(&blkg->iostat.last, &delta);
u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
}
if (parent && parent->parent)
blkcg_iostat_update(parent, &blkg->iostat.cur,
&blkg->iostat.last);
}
rcu_read_unlock();
@ -1299,6 +1302,7 @@ int blkcg_init_queue(struct request_queue *q)
ret = blk_iolatency_init(q);
if (ret) {
blk_throtl_exit(q);
blk_ioprio_exit(q);
goto err_destroy_all;
}
@ -1529,6 +1533,18 @@ void blkcg_deactivate_policy(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
static void blkcg_free_all_cpd(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
blkcg->cpd[pol->plid] = NULL;
}
}
}
/**
* blkcg_policy_register - register a blkcg policy
* @pol: blkcg policy to register
@ -1593,14 +1609,9 @@ int blkcg_policy_register(struct blkcg_policy *pol)
return 0;
err_free_cpds:
if (pol->cpd_free_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
blkcg->cpd[pol->plid] = NULL;
}
}
}
if (pol->cpd_free_fn)
blkcg_free_all_cpd(pol);
blkcg_policy[pol->plid] = NULL;
err_unlock:
mutex_unlock(&blkcg_pol_mutex);
@ -1617,8 +1628,6 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
*/
void blkcg_policy_unregister(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
mutex_lock(&blkcg_pol_register_mutex);
if (WARN_ON(blkcg_policy[pol->plid] != pol))
@ -1633,14 +1642,9 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
if (pol->cpd_free_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
blkcg->cpd[pol->plid] = NULL;
}
}
}
if (pol->cpd_free_fn)
blkcg_free_all_cpd(pol);
blkcg_policy[pol->plid] = NULL;
mutex_unlock(&blkcg_pol_mutex);
@ -1696,7 +1700,7 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
* everybody is happy with their IO latencies.
*/
if (time_before64(old + NSEC_PER_SEC, now) &&
atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) {
u64 cur = atomic64_read(&blkg->delay_nsec);
u64 sub = min_t(u64, blkg->last_delay, now - old);
int cur_use = atomic_read(&blkg->use_delay);

View File

@ -430,12 +430,8 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
* then check to see if we were the last delay so we can drop the
* congestion count on the cgroup.
*/
while (old) {
int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
if (cur == old)
break;
old = cur;
}
while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
;
if (old == 0)
return 0;
@ -458,7 +454,7 @@ static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
int old = atomic_read(&blkg->use_delay);
/* We only want 1 person setting the congestion count for this blkg. */
if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
atomic64_set(&blkg->delay_nsec, delay);
@ -475,7 +471,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
int old = atomic_read(&blkg->use_delay);
/* We only want 1 person clearing the congestion count for this blkg. */
if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
}

View File

@ -136,7 +136,7 @@ static const char *const blk_op_name[] = {
* string format. Useful in the debugging and tracing bio or request. For
* invalid REQ_OP_XXX it returns string "UNKNOWN".
*/
inline const char *blk_op_str(unsigned int op)
inline const char *blk_op_str(enum req_op op)
{
const char *op_str = "UNKNOWN";
@ -284,49 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
* put it. All future requests will be failed immediately with -ENODEV.
*
* Context: can sleep
*/
void blk_cleanup_queue(struct request_queue *q)
{
/* cannot be called from atomic context */
might_sleep();
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that blk_mq_run_hw_queues() accesses the hardware queues
* after draining finished.
*/
blk_freeze_queue(q);
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q);
if (queue_is_mq(q)) {
blk_mq_cancel_work_sync(q);
blk_mq_exit_queue(q);
}
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@ -435,7 +392,7 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
if (q->id < 0)
goto fail_srcu;
@ -485,7 +442,7 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
fail_split:
bioset_exit(&q->bio_split);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
ida_free(&blk_queue_ida, q->id);
fail_srcu:
if (alloc_srcu)
cleanup_srcu_struct(q->srcu);
@ -504,12 +461,10 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
*/
bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
return false;
if (unlikely(blk_queue_dying(q)))
return false;
kobject_get(&q->kobj);
return true;
}
EXPORT_SYMBOL(blk_get_queue);
@ -608,16 +563,15 @@ static int blk_partition_remap(struct bio *bio)
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
struct bio *bio)
{
sector_t pos = bio->bi_iter.bi_sector;
int nr_sectors = bio_sectors(bio);
/* Only applicable to zoned block devices */
if (!blk_queue_is_zoned(q))
if (!bdev_is_zoned(bio->bi_bdev))
return BLK_STS_NOTSUPP;
/* The bio sector must point to the start of a sequential zone */
if (pos & (blk_queue_zone_sectors(q) - 1) ||
!blk_queue_zone_is_seq(q, pos))
if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
!bio_zone_is_seq(bio))
return BLK_STS_IOERR;
/*
@ -762,7 +716,7 @@ void submit_bio_noacct(struct bio *bio)
might_sleep();
plug = blk_mq_plug(q, bio);
plug = blk_mq_plug(bio);
if (plug && plug->nowait)
bio->bi_opf |= REQ_NOWAIT;
@ -818,11 +772,11 @@ void submit_bio_noacct(struct bio *bio)
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
if (!blk_queue_is_zoned(q))
if (!bdev_is_zoned(bio->bi_bdev))
goto not_supported;
break;
case REQ_OP_ZONE_RESET_ALL:
if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
goto not_supported;
break;
case REQ_OP_WRITE_ZEROES:
@ -987,7 +941,7 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
again:
stamp = READ_ONCE(part->bd_stamp);
if (unlikely(time_after(now, stamp))) {
if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
__part_stat_add(part, io_ticks, end ? now - stamp : 1);
}
if (part->bd_partno) {
@ -997,7 +951,7 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
}
unsigned long bdev_start_io_acct(struct block_device *bdev,
unsigned int sectors, unsigned int op,
unsigned int sectors, enum req_op op,
unsigned long start_time)
{
const int sgrp = op_stat_group(op);
@ -1038,7 +992,7 @@ unsigned long bio_start_io_acct(struct bio *bio)
}
EXPORT_SYMBOL_GPL(bio_start_io_acct);
void bdev_end_io_acct(struct block_device *bdev, unsigned int op,
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time)
{
const int sgrp = op_stat_group(op);
@ -1247,7 +1201,7 @@ EXPORT_SYMBOL_GPL(blk_io_schedule);
int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *

View File

@ -94,7 +94,7 @@ enum {
};
static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
struct blk_flush_queue *fq, blk_opf_t flags);
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
@ -173,7 +173,7 @@ static void blk_flush_complete_seq(struct request *rq,
{
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
unsigned int cmd_flags;
blk_opf_t cmd_flags;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
@ -290,7 +290,7 @@ bool is_flush_rq(struct request *rq)
*
*/
static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
unsigned int flags)
blk_opf_t flags)
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq =

View File

@ -102,31 +102,18 @@ static struct kobj_type blk_ia_ranges_ktype = {
* disk_register_independent_access_ranges - register with sysfs a set of
* independent access ranges
* @disk: Target disk
* @new_iars: New set of independent access ranges
*
* Register with sysfs a set of independent access ranges for @disk.
* If @new_iars is not NULL, this set of ranges is registered and the old set
* specified by q->ia_ranges is unregistered. Otherwise, q->ia_ranges is
* registered if it is not already.
*/
int disk_register_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *new_iars)
int disk_register_independent_access_ranges(struct gendisk *disk)
{
struct blk_independent_access_ranges *iars = disk->ia_ranges;
struct request_queue *q = disk->queue;
struct blk_independent_access_ranges *iars;
int i, ret;
lockdep_assert_held(&q->sysfs_dir_lock);
lockdep_assert_held(&q->sysfs_lock);
/* If a new range set is specified, unregister the old one */
if (new_iars) {
if (q->ia_ranges)
disk_unregister_independent_access_ranges(disk);
q->ia_ranges = new_iars;
}
iars = q->ia_ranges;
if (!iars)
return 0;
@ -138,7 +125,7 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype,
&q->kobj, "%s", "independent_access_ranges");
if (ret) {
q->ia_ranges = NULL;
disk->ia_ranges = NULL;
kobject_put(&iars->kobj);
return ret;
}
@ -164,7 +151,7 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
void disk_unregister_independent_access_ranges(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blk_independent_access_ranges *iars = q->ia_ranges;
struct blk_independent_access_ranges *iars = disk->ia_ranges;
int i;
lockdep_assert_held(&q->sysfs_dir_lock);
@ -182,7 +169,7 @@ void disk_unregister_independent_access_ranges(struct gendisk *disk)
kfree(iars);
}
q->ia_ranges = NULL;
disk->ia_ranges = NULL;
}
static struct blk_independent_access_range *
@ -210,6 +197,9 @@ static bool disk_check_ia_ranges(struct gendisk *disk,
sector_t sector = 0;
int i;
if (WARN_ON_ONCE(!iars->nr_ia_ranges))
return false;
/*
* While sorting the ranges in increasing LBA order, check that the
* ranges do not overlap, that there are no sector holes and that all
@ -242,7 +232,7 @@ static bool disk_check_ia_ranges(struct gendisk *disk,
static bool disk_ia_ranges_changed(struct gendisk *disk,
struct blk_independent_access_ranges *new)
{
struct blk_independent_access_ranges *old = disk->queue->ia_ranges;
struct blk_independent_access_ranges *old = disk->ia_ranges;
int i;
if (!old)
@ -298,25 +288,15 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
{
struct request_queue *q = disk->queue;
if (WARN_ON_ONCE(iars && !iars->nr_ia_ranges)) {
mutex_lock(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_lock);
if (iars && !disk_check_ia_ranges(disk, iars)) {
kfree(iars);
iars = NULL;
}
mutex_lock(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_lock);
if (iars) {
if (!disk_check_ia_ranges(disk, iars)) {
kfree(iars);
iars = NULL;
goto reg;
}
if (!disk_ia_ranges_changed(disk, iars)) {
kfree(iars);
goto unlock;
}
if (iars && !disk_ia_ranges_changed(disk, iars)) {
kfree(iars);
goto unlock;
}
/*
@ -324,17 +304,12 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
* revalidation. If that is the case, we need to unregister the old
* set of independent access ranges and register the new set. If the
* queue is not registered, registration of the device request queue
* will register the independent access ranges, so only swap in the
* new set and free the old one.
* will register the independent access ranges.
*/
reg:
if (blk_queue_registered(q)) {
disk_register_independent_access_ranges(disk, iars);
} else {
swap(q->ia_ranges, iars);
kfree(iars);
}
disk_unregister_independent_access_ranges(disk);
disk->ia_ranges = iars;
if (blk_queue_registered(q))
disk_register_independent_access_ranges(disk);
unlock:
mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);

View File

@ -247,6 +247,8 @@ static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
#endif
ioc->ioprio = IOPRIO_DEFAULT;
return ioc;
}

View File

@ -2769,7 +2769,7 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
return;
switch (req_op(rq) & REQ_OP_MASK) {
switch (req_op(rq)) {
case REQ_OP_READ:
pidx = QOS_RLAT;
rw = READ;
@ -2886,15 +2886,21 @@ static int blk_iocost_init(struct request_queue *q)
* called before policy activation completion, can't assume that the
* target bio has an iocg associated and need to test for NULL iocg.
*/
rq_qos_add(q, rqos);
ret = rq_qos_add(q, rqos);
if (ret)
goto err_free_ioc;
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
if (ret) {
rq_qos_del(q, rqos);
free_percpu(ioc->pcpu_stat);
kfree(ioc);
return ret;
}
if (ret)
goto err_del_qos;
return 0;
err_del_qos:
rq_qos_del(q, rqos);
err_free_ioc:
free_percpu(ioc->pcpu_stat);
kfree(ioc);
return ret;
}
static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)

View File

@ -401,7 +401,6 @@ static void check_scale_change(struct iolatency_grp *iolat)
unsigned int cur_cookie;
unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
u64 scale_lat;
unsigned int old;
int direction = 0;
if (lat_to_blkg(iolat)->parent == NULL)
@ -422,11 +421,10 @@ static void check_scale_change(struct iolatency_grp *iolat)
else
return;
old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
/* Somebody beat us to the punch, just bail. */
if (old != our_cookie)
if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) {
/* Somebody beat us to the punch, just bail. */
return;
}
if (direction < 0 && iolat->min_lat_nsec) {
u64 samples_thresh;
@ -633,8 +631,8 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
window_start = atomic64_read(&iolat->window_start);
if (now > window_start &&
(now - window_start) >= iolat->cur_win_nsec) {
if (atomic64_cmpxchg(&iolat->window_start,
window_start, now) == window_start)
if (atomic64_try_cmpxchg(&iolat->window_start,
&window_start, now))
iolatency_check_latencies(iolat, now);
}
}
@ -773,19 +771,23 @@ int blk_iolatency_init(struct request_queue *q)
rqos->ops = &blkcg_iolatency_ops;
rqos->q = q;
rq_qos_add(q, rqos);
ret = rq_qos_add(q, rqos);
if (ret)
goto err_free;
ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
if (ret) {
rq_qos_del(q, rqos);
kfree(blkiolat);
return ret;
}
if (ret)
goto err_qos_del;
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
return 0;
err_qos_del:
rq_qos_del(q, rqos);
err_free:
kfree(blkiolat);
return ret;
}
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)

View File

@ -62,7 +62,6 @@ struct ioprio_blkg {
struct ioprio_blkcg {
struct blkcg_policy_data cpd;
enum prio_policy prio_policy;
bool prio_set;
};
static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd)
@ -113,7 +112,6 @@ static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf,
if (ret < 0)
return ret;
blkcg->prio_policy = ret;
blkcg->prio_set = true;
return nbytes;
}
@ -183,26 +181,20 @@ static struct blkcg_policy ioprio_policy = {
.pd_free_fn = ioprio_free_pd,
};
struct blk_ioprio {
struct rq_qos rqos;
};
static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
struct bio *bio)
void blkcg_set_ioprio(struct bio *bio)
{
struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio);
u16 prio;
if (!blkcg->prio_set)
if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE)
return;
/*
* Except for IOPRIO_CLASS_NONE, higher I/O priority numbers
* correspond to a lower priority. Hence, the max_t() below selects
* the lower priority of bi_ioprio and the cgroup I/O priority class.
* If the cgroup policy has been set to POLICY_NO_CHANGE == 0, the
* bio I/O priority is not modified. If the bio I/O priority equals
* IOPRIO_CLASS_NONE, the cgroup I/O priority is assigned to the bio.
* If the bio I/O priority equals IOPRIO_CLASS_NONE, the cgroup I/O
* priority is assigned to the bio.
*/
prio = max_t(u16, bio->bi_ioprio,
IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0));
@ -210,49 +202,14 @@ static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
bio->bi_ioprio = prio;
}
static void blkcg_ioprio_exit(struct rq_qos *rqos)
void blk_ioprio_exit(struct request_queue *q)
{
struct blk_ioprio *blkioprio_blkg =
container_of(rqos, typeof(*blkioprio_blkg), rqos);
blkcg_deactivate_policy(rqos->q, &ioprio_policy);
kfree(blkioprio_blkg);
blkcg_deactivate_policy(q, &ioprio_policy);
}
static struct rq_qos_ops blkcg_ioprio_ops = {
.track = blkcg_ioprio_track,
.exit = blkcg_ioprio_exit,
};
int blk_ioprio_init(struct request_queue *q)
{
struct blk_ioprio *blkioprio_blkg;
struct rq_qos *rqos;
int ret;
blkioprio_blkg = kzalloc(sizeof(*blkioprio_blkg), GFP_KERNEL);
if (!blkioprio_blkg)
return -ENOMEM;
ret = blkcg_activate_policy(q, &ioprio_policy);
if (ret) {
kfree(blkioprio_blkg);
return ret;
}
rqos = &blkioprio_blkg->rqos;
rqos->id = RQ_QOS_IOPRIO;
rqos->ops = &blkcg_ioprio_ops;
rqos->q = q;
/*
* Registering the rq-qos policy after activating the blk-cgroup
* policy guarantees that ioprio_blkcg_from_bio(bio) != NULL in the
* rq-qos callbacks.
*/
rq_qos_add(q, rqos);
return 0;
return blkcg_activate_policy(q, &ioprio_policy);
}
static int __init ioprio_init(void)

View File

@ -6,14 +6,23 @@
#include <linux/kconfig.h>
struct request_queue;
struct bio;
#ifdef CONFIG_BLK_CGROUP_IOPRIO
int blk_ioprio_init(struct request_queue *q);
void blk_ioprio_exit(struct request_queue *q);
void blkcg_set_ioprio(struct bio *bio);
#else
static inline int blk_ioprio_init(struct request_queue *q)
{
return 0;
}
static inline void blk_ioprio_exit(struct request_queue *q)
{
}
static inline void blkcg_set_ioprio(struct bio *bio)
{
}
#endif
#endif /* _BLK_IOPRIO_H_ */

View File

@ -48,10 +48,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* In case the discard granularity isn't set by buggy device driver */
if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
char dev_name[BDEVNAME_SIZE];
bdevname(bdev, dev_name);
pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
bdev);
return -EOPNOTSUPP;
}

View File

@ -164,18 +164,21 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
static inline unsigned get_max_io_size(struct request_queue *q,
struct bio *bio)
{
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
unsigned max_sectors = sectors;
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
unsigned max_sectors = queue_max_sectors(q), start, end;
max_sectors += start_offset;
max_sectors &= ~(pbs - 1);
if (max_sectors > start_offset)
return max_sectors - start_offset;
if (q->limits.chunk_sectors) {
max_sectors = min(max_sectors,
blk_chunk_sectors_left(bio->bi_iter.bi_sector,
q->limits.chunk_sectors));
}
return sectors & ~(lbs - 1);
start = bio->bi_iter.bi_sector & (pbs - 1);
end = (start + max_sectors) & ~(pbs - 1);
if (end > start)
return end - start;
return max_sectors & ~(lbs - 1);
}
static inline unsigned get_max_segment_size(const struct request_queue *q,
@ -201,11 +204,11 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
* @nsegs: [in,out] Number of segments in the bio being built. Incremented
* by the number of segments from @bv that may be appended to that
* bio without exceeding @max_segs
* @sectors: [in,out] Number of sectors in the bio being built. Incremented
* by the number of sectors from @bv that may be appended to that
* bio without exceeding @max_sectors
* @bytes: [in,out] Number of bytes in the bio being built. Incremented
* by the number of bytes from @bv that may be appended to that
* bio without exceeding @max_bytes
* @max_segs: [in] upper bound for *@nsegs
* @max_sectors: [in] upper bound for *@sectors
* @max_bytes: [in] upper bound for *@bytes
*
* When splitting a bio, it can happen that a bvec is encountered that is too
* big to fit in a single segment and hence that it has to be split in the
@ -216,10 +219,10 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
*/
static bool bvec_split_segs(const struct request_queue *q,
const struct bio_vec *bv, unsigned *nsegs,
unsigned *sectors, unsigned max_segs,
unsigned max_sectors)
unsigned *bytes, unsigned max_segs,
unsigned max_bytes)
{
unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
unsigned len = min(bv->bv_len, max_len);
unsigned total_len = 0;
unsigned seg_size = 0;
@ -237,7 +240,7 @@ static bool bvec_split_segs(const struct request_queue *q,
break;
}
*sectors += total_len >> 9;
*bytes += total_len;
/* tell the caller to split the bvec if it is too big to fit */
return len > 0 || bv->bv_len > max_len;
@ -269,8 +272,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned nsegs = 0, sectors = 0;
const unsigned max_sectors = get_max_io_size(q, bio);
unsigned nsegs = 0, bytes = 0;
const unsigned max_bytes = get_max_io_size(q, bio) << 9;
const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
@ -282,12 +285,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
goto split;
if (nsegs < max_segs &&
sectors + (bv.bv_len >> 9) <= max_sectors &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
sectors += bv.bv_len >> 9;
} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
max_sectors)) {
bytes += bv.bv_len;
} else if (bvec_split_segs(q, &bv, &nsegs, &bytes, max_segs,
max_bytes)) {
goto split;
}
@ -300,13 +303,20 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
split:
*segs = nsegs;
/*
* Individual bvecs might not be logical block aligned. Round down the
* split size so that each bio is properly block size aligned, even if
* we do not use the full hardware limits.
*/
bytes = ALIGN_DOWN(bytes, queue_logical_block_size(q));
/*
* Bio splitting may cause subtle trouble such as hang when doing sync
* iopoll in direct IO routine. Given performance gain of iopoll for
* big IO can be trival, disable iopoll when split needed.
*/
bio_clear_polled(bio);
return bio_split(bio, sectors, GFP_NOIO, bs);
return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
}
/**
@ -376,7 +386,7 @@ EXPORT_SYMBOL(blk_queue_split);
unsigned int blk_recalc_rq_segments(struct request *rq)
{
unsigned int nr_phys_segs = 0;
unsigned int nr_sectors = 0;
unsigned int bytes = 0;
struct req_iterator iter;
struct bio_vec bv;
@ -396,10 +406,12 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
return 1;
case REQ_OP_WRITE_ZEROES:
return 0;
default:
break;
}
rq_for_each_bvec(bv, rq, iter)
bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
bvec_split_segs(rq->q, &bv, &nr_phys_segs, &bytes,
UINT_MAX, UINT_MAX);
return nr_phys_segs;
}
@ -560,17 +572,18 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
struct request_queue *q = rq->q;
unsigned int max_sectors;
if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
if (!q->limits.chunk_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, offset, 0),
blk_queue_get_max_sectors(q, req_op(rq)));
return max_sectors;
return min(max_sectors,
blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
}
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
@ -700,7 +713,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
*/
void blk_rq_set_mixed_merge(struct request *rq)
{
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;
if (rq->rq_flags & RQF_MIXED_MERGE)
@ -916,7 +929,7 @@ enum bio_merge_status {
static enum bio_merge_status bio_attempt_back_merge(struct request *req,
struct bio *bio, unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
@ -940,7 +953,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
static enum bio_merge_status bio_attempt_front_merge(struct request *req,
struct bio *bio, unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
@ -1041,7 +1054,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
struct blk_plug *plug;
struct request *rq;
plug = blk_mq_plug(q, bio);
plug = blk_mq_plug(bio);
if (!plug || rq_list_empty(plug->mq_list))
return false;

View File

@ -11,11 +11,11 @@ int queue_zone_wlock_show(void *data, struct seq_file *m)
struct request_queue *q = data;
unsigned int i;
if (!q->seq_zones_wlock)
if (!q->disk->seq_zones_wlock)
return 0;
for (i = 0; i < q->nr_zones; i++)
if (test_bit(i, q->seq_zones_wlock))
for (i = 0; i < q->disk->nr_zones; i++)
if (test_bit(i, q->disk->seq_zones_wlock))
seq_printf(m, "%u\n", i);
return 0;

View File

@ -116,7 +116,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOXMERGES),
QUEUE_FLAG_NAME(ADD_RANDOM),
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(DEAD),
QUEUE_FLAG_NAME(INIT_DONE),
QUEUE_FLAG_NAME(STABLE_WRITES),
QUEUE_FLAG_NAME(POLL),
@ -151,11 +150,10 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
char opbuf[16] = { }, *op;
/*
* The "state" attribute is removed after blk_cleanup_queue() has called
* blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
* triggering a use-after-free.
* The "state" attribute is removed when the queue is removed. Don't
* allow setting the state on a dying queue to avoid a use-after-free.
*/
if (blk_queue_dead(q))
if (blk_queue_dying(q))
return -ENOENT;
if (count >= sizeof(opbuf)) {
@ -306,7 +304,7 @@ static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
const unsigned int op = req_op(rq);
const enum req_op op = req_op(rq);
const char *op_str = blk_op_str(op);
seq_printf(m, "%p {.op=", rq);
@ -315,8 +313,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
else
seq_printf(m, "%s", op_str);
seq_puts(m, ", .cmd_flags=");
blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
ARRAY_SIZE(cmd_flag_name));
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
seq_puts(m, ", .rq_flags=");
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
ARRAY_SIZE(rqf_name));
@ -377,7 +375,7 @@ struct show_busy_params {
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
* keep iterating requests.
*/
static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
static bool hctx_show_busy_rq(struct request *rq, void *data)
{
const struct show_busy_params *params = data;
@ -730,6 +728,9 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q,
char name[20];
int i;
if (!q->debugfs_dir)
return;
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);

View File

@ -203,23 +203,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
lockdep_assert_held(&q->sysfs_dir_lock);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
}
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
@ -252,16 +235,16 @@ void blk_mq_sysfs_init(struct request_queue *q)
}
}
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
int blk_mq_sysfs_register(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
unsigned long i, j;
int ret;
WARN_ON_ONCE(!q->kobj.parent);
lockdep_assert_held(&q->sysfs_dir_lock);
ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
if (ret < 0)
goto out;
@ -286,11 +269,27 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
return ret;
}
void blk_mq_sysfs_unregister(struct request_queue *q)
void blk_mq_sysfs_unregister(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
unsigned long i;
lockdep_assert_held(&q->sysfs_dir_lock);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
q->mq_sysfs_init_done = false;
}
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
@ -306,7 +305,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register(struct request_queue *q)
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;

View File

@ -37,29 +37,25 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
* to get tag when first time, the other shared-tag users could reserve
* budget for it.
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
unsigned int users;
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
return true;
}
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return;
set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
} else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
return true;
}
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
}
users = atomic_inc_return(&hctx->tags->active_queues);
blk_mq_update_wake_batch(hctx->tags, users);
return true;
}
/*
@ -266,7 +262,6 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_hw_ctx *hctx = iter_data->hctx;
struct request_queue *q = iter_data->q;
struct blk_mq_tag_set *set = q->tag_set;
bool reserved = iter_data->reserved;
struct blk_mq_tags *tags;
struct request *rq;
bool ret = true;
@ -276,7 +271,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
else
tags = hctx->tags;
if (!reserved)
if (!iter_data->reserved)
bitnr += tags->nr_reserved_tags;
/*
* We can hit rq == NULL here, because the tagging functions
@ -287,7 +282,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
return true;
if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
ret = iter_data->fn(rq, iter_data->data, reserved);
ret = iter_data->fn(rq, iter_data->data);
blk_mq_put_rq_ref(rq);
return ret;
}
@ -337,12 +332,11 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_tags_iter_data *iter_data = data;
struct blk_mq_tags *tags = iter_data->tags;
bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
struct request *rq;
bool ret = true;
bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
if (!reserved)
if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
bitnr += tags->nr_reserved_tags;
/*
@ -358,7 +352,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
blk_mq_request_started(rq))
ret = iter_data->fn(rq, iter_data->data, reserved);
ret = iter_data->fn(rq, iter_data->data);
if (!iter_static_rqs)
blk_mq_put_rq_ref(rq);
return ret;
@ -448,8 +442,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
void *data, bool reserved)
static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
{
unsigned *count = data;

View File

@ -47,15 +47,13 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
};
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return false;
return __blk_mq_tag_busy(hctx);
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_tag_busy(hctx);
}
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)

View File

@ -42,6 +42,7 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
@ -128,8 +129,7 @@ struct mq_inflight {
unsigned int inflight[2];
};
static bool blk_mq_check_inflight(struct request *rq, void *priv,
bool reserved)
static bool blk_mq_check_inflight(struct request *rq, void *priv)
{
struct mq_inflight *mi = priv;
@ -474,6 +474,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (!(data->rq_flags & RQF_ELV))
blk_mq_tag_busy(data->hctx);
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;
/*
* Try batched alloc if we want more than 1 tag.
*/
@ -507,13 +510,13 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
alloc_time_ns);
}
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = op,
.cmd_flags = opf,
.nr_tags = 1,
};
struct request *rq;
@ -537,12 +540,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = op,
.cmd_flags = opf,
.nr_tags = 1,
};
u64 alloc_time_ns = 0;
@ -588,6 +591,9 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
else
data.rq_flags |= RQF_ELV;
if (flags & BLK_MQ_REQ_RESERVED)
data.rq_flags |= RQF_RESV;
ret = -EWOULDBLOCK;
tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG)
@ -654,7 +660,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
rq->q->disk ? rq->q->disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
(__force unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
@ -707,8 +713,9 @@ static void blk_print_req_error(struct request *req, blk_status_t status)
"phys_seg %u prio class %u\n",
blk_status_to_str(status),
req->q->disk ? req->q->disk->disk_name : "?",
blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
req->cmd_flags & ~REQ_OP_MASK,
blk_rq_pos(req), (__force u32)req_op(req),
blk_op_str(req_op(req)),
(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
req->nr_phys_segments,
IOPRIO_PRIO_CLASS(req->ioprio));
}
@ -1393,8 +1400,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
static bool blk_mq_rq_inflight(struct request *rq, void *priv,
bool reserved)
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{
/*
* If we find a request that isn't idle we know the queue is busy
@ -1420,13 +1426,13 @@ bool blk_mq_queue_inflight(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
static void blk_mq_rq_timed_out(struct request *req)
{
req->rq_flags |= RQF_TIMED_OUT;
if (req->q->mq_ops->timeout) {
enum blk_eh_timer_return ret;
ret = req->q->mq_ops->timeout(req, reserved);
ret = req->q->mq_ops->timeout(req);
if (ret == BLK_EH_DONE)
return;
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
@ -1463,7 +1469,7 @@ void blk_mq_put_rq_ref(struct request *rq)
__blk_mq_free_request(rq);
}
static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
static bool blk_mq_check_expired(struct request *rq, void *priv)
{
unsigned long *next = priv;
@ -1475,7 +1481,7 @@ static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
* from blk_mq_check_expired().
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
blk_mq_rq_timed_out(rq);
return true;
}
@ -2085,14 +2091,10 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
put_cpu();
return;
}
put_cpu();
}
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
@ -2156,7 +2158,7 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
if (!blk_mq_hctx_stopped(hctx))
return hctx;
@ -2783,6 +2785,14 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return rq;
}
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@ -2799,7 +2809,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
void blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct blk_plug *plug = blk_mq_plug(q, bio);
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq;
unsigned int nr_segs = 1;
@ -2812,6 +2822,8 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
bio_set_ioprio(bio);
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
if (!bio)
@ -3276,7 +3288,7 @@ struct rq_iter_data {
bool has_rq;
};
static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
static bool blk_mq_has_request(struct request *rq, void *data)
{
struct rq_iter_data *iter_data = data;
@ -3895,7 +3907,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
q->queuedata = queuedata;
ret = blk_mq_init_allocated_queue(set, q);
if (ret) {
blk_cleanup_queue(q);
blk_put_queue(q);
return ERR_PTR(ret);
}
return q;
@ -3907,6 +3919,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
/**
* blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown
*
* This shuts down a request queue allocated by blk_mq_init_queue() and drops
* the initial reference. All future requests will failed with -ENODEV.
*
* Context: can sleep
*/
void blk_mq_destroy_queue(struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_mq(q));
WARN_ON_ONCE(blk_queue_registered(q));
might_sleep();
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
blk_freeze_queue(q);
blk_sync_queue(q);
blk_mq_cancel_work_sync(q);
blk_mq_exit_queue(q);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_mq_destroy_queue);
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass)
{
@ -3919,13 +3960,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
disk = __alloc_disk_node(q, set->numa_node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
blk_mq_destroy_queue(q);
return ERR_PTR(-ENOMEM);
}
set_bit(GD_OWNS_QUEUE, &disk->state);
return disk;
}
EXPORT_SYMBOL(__blk_mq_alloc_disk);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass)
{
if (!blk_get_queue(q))
return NULL;
return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
}
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)
@ -4513,7 +4564,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
blk_mq_sysfs_unregister_hctxs(q);
}
prev_nr_hw_queues = set->nr_hw_queues;
@ -4544,7 +4595,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register(q);
blk_mq_sysfs_register_hctxs(q);
blk_mq_debugfs_register_hctxs(q);
}

View File

@ -86,16 +86,16 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
}
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
/*
* The caller ensure that if REQ_POLLED, poll must be enabled.
*/
if (flags & REQ_POLLED)
if (opf & REQ_POLLED)
type = HCTX_TYPE_POLL;
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
return type;
}
@ -103,14 +103,14 @@ static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
/*
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
* @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
* @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
blk_opf_t opf,
struct blk_mq_ctx *ctx)
{
return ctx->hctxs[blk_mq_get_hctx_type(flags)];
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
}
/*
@ -118,9 +118,10 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
*/
extern void blk_mq_sysfs_init(struct request_queue *q);
extern void blk_mq_sysfs_deinit(struct request_queue *q);
extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
int blk_mq_sysfs_register(struct gendisk *disk);
void blk_mq_sysfs_unregister(struct gendisk *disk);
int blk_mq_sysfs_register_hctxs(struct request_queue *q);
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
@ -151,7 +152,7 @@ struct blk_mq_alloc_data {
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
unsigned int cmd_flags;
blk_opf_t cmd_flags;
req_flags_t rq_flags;
/* allocate multiple requests/tags in one go */
@ -293,7 +294,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
/*
* blk_mq_plug() - Get caller context plug
* @q: request queue
* @bio : the bio being submitted by the caller context
*
* Plugging, by design, may delay the insertion of BIOs into the elevator in
@ -304,23 +304,22 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
* order. While this is not a problem with regular block devices, this ordering
* change can cause write BIO failures with zoned block devices as these
* require sequential write patterns to zones. Prevent this from happening by
* ignoring the plug state of a BIO issuing context if the target request queue
* is for a zoned block device and the BIO to plug is a write operation.
* ignoring the plug state of a BIO issuing context if it is for a zoned block
* device and the BIO to plug is a write operation.
*
* Return current->plug if the bio can be plugged and NULL otherwise
*/
static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
struct bio *bio)
static inline struct blk_plug *blk_mq_plug( struct bio *bio)
{
/* Zoned block device write operation case: do not plug the BIO */
if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio)))
return NULL;
/*
* For regular block devices or read operations, use the context plug
* which may be NULL if blk_start_plug() was not executed.
*/
if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
return current->plug;
/* Zoned block device write operation case: do not plug the BIO */
return NULL;
return current->plug;
}
/* Free all requests on the list */

View File

@ -10,16 +10,10 @@ static bool atomic_inc_below(atomic_t *v, unsigned int below)
{
unsigned int cur = atomic_read(v);
for (;;) {
unsigned int old;
do {
if (cur >= below)
return false;
old = atomic_cmpxchg(v, cur, cur + 1);
if (old == cur)
break;
cur = old;
}
} while (!atomic_try_cmpxchg(v, &cur, cur + 1));
return true;
}

View File

@ -86,7 +86,7 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
init_waitqueue_head(&rq_wait->wait);
}
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
/*
* No IO can be in-flight when adding rqos, so freeze queue, which
@ -98,6 +98,8 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
if (rq_qos_id(q, rqos->id))
goto ebusy;
rqos->next = q->rq_qos;
q->rq_qos = rqos;
spin_unlock_irq(&q->queue_lock);
@ -109,6 +111,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
blk_mq_debugfs_register_rqos(rqos);
mutex_unlock(&q->debugfs_mutex);
}
return 0;
ebusy:
spin_unlock_irq(&q->queue_lock);
blk_mq_unfreeze_queue(q);
return -EBUSY;
}
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)

View File

@ -893,18 +893,19 @@ static bool disk_has_partitions(struct gendisk *disk)
}
/**
* blk_queue_set_zoned - configure a disk queue zoned model.
* disk_set_zoned - configure the zoned model for a disk
* @disk: the gendisk of the queue to configure
* @model: the zoned model to set
*
* Set the zoned model of the request queue of @disk according to @model.
* Set the zoned model of @disk to @model.
*
* When @model is BLK_ZONED_HM (host managed), this should be called only
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
* on the disk.
*/
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
{
struct request_queue *q = disk->queue;
@ -945,10 +946,10 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
blk_queue_zone_write_granularity(q,
queue_logical_block_size(q));
} else {
blk_queue_clear_zone_settings(q);
disk_clear_zone_settings(disk);
}
}
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
EXPORT_SYMBOL_GPL(disk_set_zoned);
int bdev_alignment_offset(struct block_device *bdev)
{

View File

@ -274,6 +274,11 @@ static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page
return queue_var_show(q->limits.virt_boundary_mask, page);
}
static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_dma_alignment(q), page);
}
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
static ssize_t \
queue_##name##_show(struct request_queue *q, char *page) \
@ -320,17 +325,17 @@ static ssize_t queue_zoned_show(struct request_queue *q, char *page)
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_nr_zones(q), page);
return queue_var_show(disk_nr_zones(q->disk), page);
}
static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_max_open_zones(q), page);
return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
}
static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_max_active_zones(q), page);
return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
}
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
@ -606,6 +611,7 @@ QUEUE_RO_ENTRY(queue_dax, "dax");
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
@ -667,6 +673,7 @@ static struct attribute *queue_attrs[] = {
&blk_throtl_sample_time_entry.attr,
#endif
&queue_virt_boundary_mask_entry.attr,
&queue_dma_alignment_entry.attr,
NULL,
};
@ -748,11 +755,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
* decremented with blk_put_queue(). Once the refcount reaches 0 this function
* is called.
*
* For drivers that have a request_queue on a gendisk and added with
* __device_add_disk() the refcount to request_queue will reach 0 with
* the last put_disk() called by the driver. For drivers which don't use
* __device_add_disk() this happens with blk_cleanup_queue().
*
* Drivers exist which depend on the release of the request_queue to be
* synchronous, it should not be deferred.
*
@ -774,8 +776,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
kfree(q->poll_stat);
blk_queue_free_zone_bitmaps(q);
if (queue_is_mq(q))
blk_mq_release(q);
@ -784,7 +784,7 @@ static void blk_release_queue(struct kobject *kobj)
if (blk_queue_has_srcu(q))
cleanup_srcu_struct(q->srcu);
ida_simple_remove(&blk_queue_ida, q->id);
ida_free(&blk_queue_ida, q->id);
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
@ -793,7 +793,13 @@ static const struct sysfs_ops queue_sysfs_ops = {
.store = queue_attr_store,
};
static const struct attribute_group *blk_queue_attr_groups[] = {
&queue_attr_group,
NULL
};
struct kobj_type blk_queue_ktype = {
.default_groups = blk_queue_attr_groups,
.sysfs_ops = &queue_sysfs_ops,
.release = blk_release_queue,
};
@ -804,32 +810,17 @@ struct kobj_type blk_queue_ktype = {
*/
int blk_register_queue(struct gendisk *disk)
{
int ret;
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
int ret;
mutex_lock(&q->sysfs_dir_lock);
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
blk_trace_remove_sysfs(dev);
ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue");
if (ret < 0)
goto unlock;
}
ret = sysfs_create_group(&q->kobj, &queue_attr_group);
if (ret) {
blk_trace_remove_sysfs(dev);
kobject_del(&q->kobj);
kobject_put(&dev->kobj);
goto unlock;
}
if (queue_is_mq(q))
__blk_mq_register_dev(dev, q);
blk_mq_sysfs_register(disk);
mutex_lock(&q->sysfs_lock);
mutex_lock(&q->debugfs_mutex);
@ -839,7 +830,7 @@ int blk_register_queue(struct gendisk *disk)
blk_mq_debugfs_register(q);
mutex_unlock(&q->debugfs_mutex);
ret = disk_register_independent_access_ranges(disk, NULL);
ret = disk_register_independent_access_ranges(disk);
if (ret)
goto put_dev;
@ -888,8 +879,6 @@ int blk_register_queue(struct gendisk *disk)
mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
@ -927,9 +916,8 @@ void blk_unregister_queue(struct gendisk *disk)
* structures that can be modified through sysfs.
*/
if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
blk_mq_sysfs_unregister(disk);
blk_crypto_sysfs_unregister(q);
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
elv_unregister_queue(q);
@ -948,6 +936,4 @@ void blk_unregister_queue(struct gendisk *disk)
q->sched_debugfs_dir = NULL;
q->rqos_debugfs_dir = NULL;
mutex_unlock(&q->debugfs_mutex);
kobject_put(&disk_to_dev(disk)->kobj);
}

View File

@ -2203,8 +2203,9 @@ bool __blk_throtl_bio(struct bio *bio)
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static void throtl_track_latency(struct throtl_data *td, sector_t size,
int op, unsigned long time)
enum req_op op, unsigned long time)
{
const bool rw = op_is_write(op);
struct latency_bucket *latency;
int index;
@ -2215,10 +2216,10 @@ static void throtl_track_latency(struct throtl_data *td, sector_t size,
index = request_bucket_index(size);
latency = get_cpu_ptr(td->latency_buckets[op]);
latency = get_cpu_ptr(td->latency_buckets[rw]);
latency[index].total_latency += time;
latency[index].samples++;
put_cpu_ptr(td->latency_buckets[op]);
put_cpu_ptr(td->latency_buckets[rw]);
}
void blk_throtl_stat_add(struct request *rq, u64 time_ns)

View File

@ -451,7 +451,7 @@ static bool close_io(struct rq_wb *rwb)
#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{
unsigned int limit;
@ -462,7 +462,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
if (!rwb_enabled(rwb))
return UINT_MAX;
if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
return rwb->wb_background;
/*
@ -473,9 +473,9 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
* the idle limit, or go to normal if we haven't had competing
* IO for a bit.
*/
if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
limit = rwb->rq_depth.max_depth;
else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
/*
* If less than 100ms since we completed unrelated IO,
* limit us to half the depth for background writeback.
@ -490,13 +490,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
struct wbt_wait_data {
struct rq_wb *rwb;
enum wbt_flags wb_acct;
unsigned long rw;
blk_opf_t opf;
};
static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{
struct wbt_wait_data *data = private_data;
return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
}
static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
@ -510,13 +510,13 @@ static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
unsigned long rw)
blk_opf_t opf)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
struct wbt_wait_data data = {
.rwb = rwb,
.wb_acct = wb_acct,
.rw = rw,
.opf = opf,
};
rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
@ -670,7 +670,7 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
static int wbt_data_dir(const struct request *rq)
{
const int op = req_op(rq);
const enum req_op op = req_op(rq);
if (op == REQ_OP_READ)
return READ;
@ -820,6 +820,7 @@ int wbt_init(struct request_queue *q)
{
struct rq_wb *rwb;
int i;
int ret;
rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
if (!rwb)
@ -846,7 +847,10 @@ int wbt_init(struct request_queue *q)
/*
* Assign rwb and add the stats callback.
*/
rq_qos_add(q, &rwb->rqos);
ret = rq_qos_add(q, &rwb->rqos);
if (ret)
goto err_free;
blk_stat_add_callback(q, rwb->cb);
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
@ -855,4 +859,10 @@ int wbt_init(struct request_queue *q)
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0;
err_free:
blk_stat_free_callback(rwb->cb);
kfree(rwb);
return ret;
}

View File

@ -57,10 +57,10 @@ EXPORT_SYMBOL_GPL(blk_zone_cond_str);
*/
bool blk_req_needs_zone_write_lock(struct request *rq)
{
if (!rq->q->seq_zones_wlock)
if (blk_rq_is_passthrough(rq))
return false;
if (blk_rq_is_passthrough(rq))
if (!rq->q->disk->seq_zones_wlock)
return false;
switch (req_op(rq)) {
@ -77,7 +77,7 @@ bool blk_req_zone_write_trylock(struct request *rq)
{
unsigned int zno = blk_rq_zone_no(rq);
if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock))
return false;
WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
void __blk_req_zone_write_lock(struct request *rq)
{
if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
rq->q->seq_zones_wlock)))
rq->q->disk->seq_zones_wlock)))
return;
WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
@ -101,28 +101,29 @@ EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
void __blk_req_zone_write_unlock(struct request *rq)
{
rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
if (rq->q->seq_zones_wlock)
if (rq->q->disk->seq_zones_wlock)
WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
rq->q->seq_zones_wlock));
rq->q->disk->seq_zones_wlock));
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
/**
* blkdev_nr_zones - Get number of zones
* @disk: Target gendisk
* bdev_nr_zones - Get number of zones
* @bdev: Target device
*
* Return the total number of zones of a zoned block device. For a block
* device without zone capabilities, the number of zones is always 0.
*/
unsigned int blkdev_nr_zones(struct gendisk *disk)
unsigned int bdev_nr_zones(struct block_device *bdev)
{
sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
sector_t zone_sectors = bdev_zone_sectors(bdev);
if (!blk_queue_is_zoned(disk->queue))
if (!bdev_is_zoned(bdev))
return 0;
return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
return (bdev_nr_sectors(bdev) + zone_sectors - 1) >>
ilog2(zone_sectors);
}
EXPORT_SYMBOL_GPL(blkdev_nr_zones);
EXPORT_SYMBOL_GPL(bdev_nr_zones);
/**
* blkdev_report_zones - Get zones information
@ -149,8 +150,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
struct gendisk *disk = bdev->bd_disk;
sector_t capacity = get_capacity(disk);
if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
WARN_ON_ONCE(!disk->fops->report_zones))
if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP;
if (!nr_zones || sector >= capacity)
@ -189,27 +189,26 @@ static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
sector_t capacity = get_capacity(bdev->bd_disk);
sector_t zone_sectors = blk_queue_zone_sectors(q);
struct gendisk *disk = bdev->bd_disk;
sector_t capacity = bdev_nr_sectors(bdev);
sector_t zone_sectors = bdev_zone_sectors(bdev);
unsigned long *need_reset;
struct bio *bio = NULL;
sector_t sector = 0;
int ret;
need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones);
need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
if (!need_reset)
return -ENOMEM;
ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0,
q->nr_zones, blk_zone_need_reset_cb,
need_reset);
ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
blk_zone_need_reset_cb, need_reset);
if (ret < 0)
goto out_free_need_reset;
ret = 0;
while (sector < capacity) {
if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) {
if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
sector += zone_sectors;
continue;
}
@ -257,18 +256,17 @@ static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
* The operation to execute on each zone can be a zone reset, open, close
* or finish request.
*/
int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
sector_t sector, sector_t nr_sectors,
gfp_t gfp_mask)
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sector, sector_t nr_sectors, gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors = blk_queue_zone_sectors(q);
sector_t capacity = get_capacity(bdev->bd_disk);
sector_t zone_sectors = bdev_zone_sectors(bdev);
sector_t capacity = bdev_nr_sectors(bdev);
sector_t end_sector = sector + nr_sectors;
struct bio *bio = NULL;
int ret = 0;
if (!blk_queue_is_zoned(q))
if (!bdev_is_zoned(bdev))
return -EOPNOTSUPP;
if (bdev_read_only(bdev))
@ -350,7 +348,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!q)
return -ENXIO;
if (!blk_queue_is_zoned(q))
if (!bdev_is_zoned(bdev))
return -ENOTTY;
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
@ -398,7 +396,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
void __user *argp = (void __user *)arg;
struct request_queue *q;
struct blk_zone_range zrange;
enum req_opf op;
enum req_op op;
int ret;
if (!argp)
@ -408,7 +406,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
if (!q)
return -ENXIO;
if (!blk_queue_is_zoned(q))
if (!bdev_is_zoned(bdev))
return -ENOTTY;
if (!(mode & FMODE_WRITE))
@ -450,12 +448,12 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
void blk_queue_free_zone_bitmaps(struct request_queue *q)
void disk_free_zone_bitmaps(struct gendisk *disk)
{
kfree(q->conv_zones_bitmap);
q->conv_zones_bitmap = NULL;
kfree(q->seq_zones_wlock);
q->seq_zones_wlock = NULL;
kfree(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
kfree(disk->seq_zones_wlock);
disk->seq_zones_wlock = NULL;
}
struct blk_revalidate_zone_args {
@ -605,15 +603,15 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
blk_mq_freeze_queue(q);
if (ret > 0) {
blk_queue_chunk_sectors(q, args.zone_sectors);
q->nr_zones = args.nr_zones;
swap(q->seq_zones_wlock, args.seq_zones_wlock);
swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
disk->nr_zones = args.nr_zones;
swap(disk->seq_zones_wlock, args.seq_zones_wlock);
swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
if (update_driver_data)
update_driver_data(disk);
ret = 0;
} else {
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
blk_queue_free_zone_bitmaps(q);
disk_free_zone_bitmaps(disk);
}
blk_mq_unfreeze_queue(q);
@ -623,16 +621,18 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
void blk_queue_clear_zone_settings(struct request_queue *q)
void disk_clear_zone_settings(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
blk_mq_freeze_queue(q);
blk_queue_free_zone_bitmaps(q);
disk_free_zone_bitmaps(disk);
blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
q->nr_zones = 0;
q->max_open_zones = 0;
q->max_active_zones = 0;
disk->nr_zones = 0;
disk->max_open_zones = 0;
disk->max_active_zones = 0;
q->limits.chunk_sectors = 0;
q->limits.zone_write_granularity = 0;
q->limits.max_zone_append_sectors = 0;

View File

@ -31,11 +31,6 @@ extern struct kmem_cache *blk_requestq_srcu_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
static inline void __blk_get_queue(struct request_queue *q)
{
kobject_get(&q->kobj);
}
bool is_flush_rq(struct request *req);
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
@ -159,6 +154,19 @@ static inline bool blk_discard_mergable(struct request *req)
return false;
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
enum req_op op)
{
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;
return q->limits.max_sectors;
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
@ -392,11 +400,11 @@ static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
void blk_queue_clear_zone_settings(struct request_queue *q);
void disk_free_zone_bitmaps(struct gendisk *disk);
void disk_clear_zone_settings(struct gendisk *disk);
#else
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
static inline void disk_clear_zone_settings(struct gendisk *disk) {}
#endif
int blk_alloc_ext_minor(void);
@ -411,6 +419,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
void blk_drop_partitions(struct gendisk *disk);
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass);
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
@ -436,13 +447,14 @@ extern struct device_attribute dev_attr_events;
extern struct device_attribute dev_attr_events_async;
extern struct device_attribute dev_attr_events_poll_msecs;
extern struct attribute_group blk_trace_attr_group;
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
extern const struct address_space_operations def_blk_aops;
int disk_register_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *new_iars);
int disk_register_independent_access_ranges(struct gendisk *disk);
void disk_unregister_independent_access_ranges(struct gendisk *disk);
#ifdef CONFIG_FAIL_MAKE_REQUEST

View File

@ -205,19 +205,26 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
int rw = bio_data_dir(*bio_orig);
struct bio_vec *to, from;
struct bvec_iter iter;
unsigned i = 0;
unsigned i = 0, bytes = 0;
bool bounce = false;
int sectors = 0;
int sectors;
bio_for_each_segment(from, *bio_orig, iter) {
if (i++ < BIO_MAX_VECS)
sectors += from.bv_len >> 9;
bytes += from.bv_len;
if (PageHighMem(from.bv_page))
bounce = true;
}
if (!bounce)
return;
/*
* Individual bvecs might not be logical block aligned. Round down
* the split size so that each bio is properly block size aligned,
* even if we do not use the full hardware limits.
*/
sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
SECTOR_SHIFT;
if (sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig);

View File

@ -324,14 +324,14 @@ void bsg_remove_queue(struct request_queue *q)
container_of(q->tag_set, struct bsg_set, tag_set);
bsg_unregister_queue(bset->bd);
blk_cleanup_queue(q);
blk_mq_destroy_queue(q);
blk_mq_free_tag_set(&bset->tag_set);
kfree(bset);
}
}
EXPORT_SYMBOL_GPL(bsg_remove_queue);
static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
static enum blk_eh_timer_return bsg_timeout(struct request *rq)
{
struct bsg_set *bset =
container_of(rq->q->tag_set, struct bsg_set, tag_set);
@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
return q;
out_cleanup_queue:
blk_cleanup_queue(q);
blk_mq_destroy_queue(q);
out_queue:
blk_mq_free_tag_set(set);
out_tag_set:

View File

@ -34,7 +34,7 @@ struct elevator_mq_ops {
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *);
void (*prepare_request)(struct request *);
void (*finish_request)(struct request *);
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);

View File

@ -32,14 +32,21 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
return 0;
}
static unsigned int dio_bio_write_op(struct kiocb *iocb)
static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
{
unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
/* avoid the need for a I/O completion work item */
if (iocb->ki_flags & IOCB_DSYNC)
op |= REQ_FUA;
return op;
opf |= REQ_FUA;
return opf;
}
static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos,
struct iov_iter *iter)
{
return pos & (bdev_logical_block_size(bdev) - 1) ||
!bdev_iter_is_aligned(bdev, iter);
}
#define DIO_INLINE_BIO_VECS 4
@ -54,8 +61,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
struct bio bio;
ssize_t ret;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
if (blkdev_dio_unaligned(bdev, pos, iter))
return -EINVAL;
if (nr_pages <= DIO_INLINE_BIO_VECS)
@ -169,12 +175,11 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
struct blkdev_dio *dio;
struct bio *bio;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
loff_t pos = iocb->ki_pos;
int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
if (blkdev_dio_unaligned(bdev, pos, iter))
return -EINVAL;
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
@ -292,14 +297,13 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
{
struct block_device *bdev = iocb->ki_filp->private_data;
bool is_read = iov_iter_rw(iter) == READ;
unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
struct blkdev_dio *dio;
struct bio *bio;
loff_t pos = iocb->ki_pos;
int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
if (blkdev_dio_unaligned(bdev, pos, iter))
return -EINVAL;
if (iocb->ki_flags & IOCB_ALLOC_CACHE)

View File

@ -101,29 +101,6 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
}
EXPORT_SYMBOL_GPL(set_capacity_and_notify);
/*
* Format the device name of the indicated block device into the supplied buffer
* and return a pointer to that same buffer for convenience.
*
* Note: do not use this in new code, use the %pg specifier to sprintf and
* printk insted.
*/
const char *bdevname(struct block_device *bdev, char *buf)
{
struct gendisk *hd = bdev->bd_disk;
int partno = bdev->bd_partno;
if (!partno)
snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
else
snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
return buf;
}
EXPORT_SYMBOL(bdevname);
static void part_stat_read_all(struct block_device *part,
struct disk_stats *stat)
{
@ -617,6 +594,8 @@ void del_gendisk(struct gendisk *disk)
* Fail any new I/O.
*/
set_bit(GD_DEAD, &disk->state);
if (test_bit(GD_OWNS_QUEUE, &disk->state))
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
set_capacity(disk, 0);
/*
@ -663,11 +642,16 @@ void del_gendisk(struct gendisk *disk)
blk_mq_unquiesce_queue(q);
/*
* Allow using passthrough request again after the queue is torn down.
* If the disk does not own the queue, allow using passthrough requests
* again. Else leave the queue frozen to fail all I/O.
*/
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
} else {
if (queue_is_mq(q))
blk_mq_exit_queue(q);
}
}
EXPORT_SYMBOL(del_gendisk);
@ -1127,6 +1111,9 @@ static struct attribute_group disk_attr_group = {
static const struct attribute_group *disk_attr_groups[] = {
&disk_attr_group,
#ifdef CONFIG_BLK_DEV_IO_TRACE
&blk_trace_attr_group,
#endif
NULL
};
@ -1151,10 +1138,23 @@ static void disk_release(struct device *dev)
might_sleep();
WARN_ON_ONCE(disk_live(disk));
/*
* To undo the all initialization from blk_mq_init_allocated_queue in
* case of a probe failure where add_disk is never called we have to
* call blk_mq_exit_queue here. We can't do this for the more common
* teardown case (yet) as the tagset can be gone by the time the disk
* is released once it was added.
*/
if (queue_is_mq(disk->queue) &&
test_bit(GD_OWNS_QUEUE, &disk->state) &&
!test_bit(GD_ADDED, &disk->state))
blk_mq_exit_queue(disk->queue);
blkcg_exit_queue(disk->queue);
disk_release_events(disk);
kfree(disk->random);
disk_free_zone_bitmaps(disk);
xa_destroy(&disk->part_tbl);
disk->queue->disk = NULL;
@ -1338,9 +1338,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
{
struct gendisk *disk;
if (!blk_get_queue(q))
return NULL;
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (!disk)
goto out_put_queue;
@ -1391,7 +1388,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
blk_put_queue(q);
return NULL;
}
EXPORT_SYMBOL(__alloc_disk_node);
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
{
@ -1404,9 +1400,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
disk = __alloc_disk_node(q, node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
blk_put_queue(q);
return NULL;
}
set_bit(GD_OWNS_QUEUE, &disk->state);
return disk;
}
EXPORT_SYMBOL(__blk_alloc_disk);
@ -1418,6 +1415,9 @@ EXPORT_SYMBOL(__blk_alloc_disk);
* This decrements the refcount for the struct gendisk. When this reaches 0
* we'll have disk_release() called.
*
* Note: for blk-mq disk put_disk must be called before freeing the tag_set
* when handling probe errors (that is before add_disk() is called).
*
* Context: Any context, but the last reference must not be dropped from
* atomic context.
*/
@ -1428,22 +1428,6 @@ void put_disk(struct gendisk *disk)
}
EXPORT_SYMBOL(put_disk);
/**
* blk_cleanup_disk - shutdown a gendisk allocated by blk_alloc_disk
* @disk: gendisk to shutdown
*
* Mark the queue hanging off @disk DYING, drain all pending requests, then mark
* the queue DEAD, destroy and put it and the gendisk structure.
*
* Context: can sleep
*/
void blk_cleanup_disk(struct gendisk *disk)
{
blk_cleanup_queue(disk->queue);
put_disk(disk);
}
EXPORT_SYMBOL(blk_cleanup_disk);
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
{
char event[] = "DISK_RO=1";

View File

@ -495,7 +495,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
case BLKGETZONESZ:
return put_uint(argp, bdev_zone_sectors(bdev));
case BLKGETNRZONES:
return put_uint(argp, blkdev_nr_zones(bdev->bd_disk));
return put_uint(argp, bdev_nr_zones(bdev));
case BLKROGET:
return put_int(argp, bdev_read_only(bdev) != 0);
case BLKSSZGET: /* get block device logical block size */

View File

@ -138,6 +138,32 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
return ret;
}
/*
* If the task has set an I/O priority, use that. Otherwise, return
* the default I/O priority.
*
* Expected to be called for current task or with task_lock() held to keep
* io_context stable.
*/
int __get_task_ioprio(struct task_struct *p)
{
struct io_context *ioc = p->io_context;
int prio;
if (p != current)
lockdep_assert_held(&p->alloc_lock);
if (ioc)
prio = ioc->ioprio;
else
prio = IOPRIO_DEFAULT;
if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
task_nice_ioprio(p));
return prio;
}
EXPORT_SYMBOL_GPL(__get_task_ioprio);
static int get_task_ioprio(struct task_struct *p)
{
int ret;
@ -145,22 +171,38 @@ static int get_task_ioprio(struct task_struct *p)
ret = security_task_getioprio(p);
if (ret)
goto out;
ret = IOPRIO_DEFAULT;
task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
ret = __get_task_ioprio(p);
task_unlock(p);
out:
return ret;
}
int ioprio_best(unsigned short aprio, unsigned short bprio)
/*
* Return raw IO priority value as set by userspace. We use this for
* ioprio_get(pid, IOPRIO_WHO_PROCESS) so that we keep historical behavior and
* also so that userspace can distinguish unset IO priority (which just gets
* overriden based on task's nice value) from IO priority set to some value.
*/
static int get_task_raw_ioprio(struct task_struct *p)
{
if (!ioprio_valid(aprio))
aprio = IOPRIO_DEFAULT;
if (!ioprio_valid(bprio))
bprio = IOPRIO_DEFAULT;
int ret;
ret = security_task_getioprio(p);
if (ret)
goto out;
task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
else
ret = IOPRIO_DEFAULT;
task_unlock(p);
out:
return ret;
}
static int ioprio_best(unsigned short aprio, unsigned short bprio)
{
return min(aprio, bprio);
}
@ -181,7 +223,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
else
p = find_task_by_vpid(who);
if (p)
ret = get_task_ioprio(p);
ret = get_task_raw_ioprio(p);
break;
case IOPRIO_WHO_PGRP:
if (!who)

View File

@ -195,9 +195,9 @@ struct kyber_hctx_data {
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
void *key);
static unsigned int kyber_sched_domain(unsigned int op)
static unsigned int kyber_sched_domain(blk_opf_t opf)
{
switch (op & REQ_OP_MASK) {
switch (opf & REQ_OP_MASK) {
case REQ_OP_READ:
return KYBER_READ;
case REQ_OP_WRITE:
@ -553,13 +553,13 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
}
}
static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{
/*
* We use the scheduler tags as per-hardware queue queueing tokens.
* Async requests can be limited at this stage.
*/
if (!op_is_sync(op)) {
if (!op_is_sync(opf)) {
struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
data->shallow_depth = kqd->async_depth;

View File

@ -543,12 +543,12 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
* function is used by __blk_mq_get_tag().
*/
static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{
struct deadline_data *dd = data->q->elevator->elevator_data;
/* Do not throttle synchronous reads. */
if (op_is_sync(op) && !op_is_write(op))
if (op_is_sync(opf) && !op_is_write(opf))
return;
/*

View File

@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/vmalloc.h>
#include <linux/blktrace_api.h>
#include <linux/raid/detect.h>
#include "check.h"
@ -331,7 +330,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
case BLK_ZONED_HA:
pr_info("%s: disabling host aware zoned block device support due to partitions\n",
disk->disk_name);
blk_queue_set_zoned(disk, BLK_ZONED_NONE);
disk_set_zoned(disk, BLK_ZONED_NONE);
break;
case BLK_ZONED_NONE:
break;

View File

@ -408,6 +408,15 @@ config BLK_DEV_RBD
If unsure, say N.
config BLK_DEV_UBLK
tristate "Userspace block driver (Experimental)"
select IO_URING
help
io_uring based userspace block driver. Together with ublk server, ublk
has been working well, but interface with userspace or command data
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV

View File

@ -39,4 +39,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
swim_mod-y := swim.o swim_asm.o

View File

@ -1802,7 +1802,7 @@ static int fd_alloc_disk(int drive, int system)
unit[drive].gendisk[system] = disk;
err = add_disk(disk);
if (err)
blk_cleanup_disk(disk);
put_disk(disk);
return err;
}

View File

@ -427,7 +427,7 @@ aoeblk_gdalloc(void *vp)
return;
out_disk_cleanup:
blk_cleanup_disk(gd);
put_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:

View File

@ -277,7 +277,7 @@ freedev(struct aoedev *d)
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
blk_cleanup_disk(d->gd);
put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
}
t = d->targets;

View File

@ -2031,7 +2031,7 @@ static void ataflop_probe(dev_t dev)
return;
cleanup_disk:
blk_cleanup_disk(unit[drive].disk[type]);
put_disk(unit[drive].disk[type]);
unit[drive].disk[type] = NULL;
}
@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
blk_cleanup_queue(unit[i].disk[type]->queue);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);
@ -2064,7 +2063,7 @@ static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
continue;
if (fs->registered[type])
del_gendisk(fs->disk[type]);
blk_cleanup_disk(fs->disk[type]);
put_disk(fs->disk[type]);
}
blk_mq_free_tag_set(&fs->tag_set);
}

View File

@ -256,7 +256,7 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
unsigned int len, unsigned int off, unsigned int op,
unsigned int len, unsigned int off, enum req_op op,
sector_t sector)
{
void *mem;
@ -310,7 +310,7 @@ static void brd_submit_bio(struct bio *bio)
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, unsigned int op)
struct page *page, enum req_op op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
@ -419,7 +419,7 @@ static int brd_alloc(int i)
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out_free_dev:
list_del(&brd->brd_list);
kfree(brd);
@ -439,7 +439,7 @@ static void brd_cleanup(void)
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
del_gendisk(brd->brd_disk);
blk_cleanup_disk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
list_del(&brd->brd_list);
kfree(brd);

View File

@ -124,12 +124,13 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
sector_t sector, int op)
sector_t sector, enum req_op op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
int err, op_flags = 0;
int err;
blk_opf_t op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
@ -174,7 +175,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t sector, int op)
sector_t sector, enum req_op op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@ -385,7 +386,7 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
err = -EIO;
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {

View File

@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;

View File

@ -1495,7 +1495,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, int op);
struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
@ -1547,8 +1547,7 @@ extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
const unsigned, const int);
struct drbd_peer_request *, blk_opf_t, int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,

View File

@ -2207,7 +2207,7 @@ void drbd_destroy_device(struct kref *kref)
if (device->bitmap) /* should no longer be there. */
drbd_bm_cleanup(device);
__free_page(device->md_io.page);
blk_cleanup_disk(device->vdisk);
put_disk(device->vdisk);
kfree(device->rs_plan_s);
/* not for_each_connection(connection, resource):
@ -2807,7 +2807,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
out_no_bitmap:
__free_page(device->md_io.page);
out_no_io_page:
blk_cleanup_disk(disk);
put_disk(disk);
out_no_disk:
kref_put(&resource->kref, drbd_destroy_resource);
kfree(device);

View File

@ -1621,8 +1621,7 @@ static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, stru
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
const unsigned op, const unsigned op_flags,
const int fault_type)
const blk_opf_t opf, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
@ -1668,8 +1667,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
GFP_NOIO);
bio = bio_alloc(device->ldev->backing_bdev, nr_pages, opf, GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_private = peer_req;
@ -2060,7 +2058,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE,
DRBD_FAULT_RS_WR) == 0)
return 0;
@ -2383,14 +2381,14 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
static unsigned long wire_flags_to_bio_flags(u32 dpf)
static blk_opf_t wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
}
static unsigned long wire_flags_to_bio_op(u32 dpf)
static enum req_op wire_flags_to_bio_op(u32 dpf)
{
if (dpf & DP_ZEROES)
return REQ_OP_WRITE_ZEROES;
@ -2543,7 +2541,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
int op, op_flags;
enum req_op op;
blk_opf_t op_flags;
u32 dp_flags;
int err, tp;
@ -2681,7 +2680,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
err = drbd_submit_peer_request(device, peer_req, op, op_flags,
err = drbd_submit_peer_request(device, peer_req, op | op_flags,
DRBD_FAULT_DT_WR);
if (!err)
return 0;
@ -2979,7 +2978,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
fault_type) == 0)
return 0;
@ -4951,7 +4950,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
const int op = REQ_OP_WRITE_ZEROES;
const enum req_op op = REQ_OP_WRITE_ZEROES;
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
size, 0, GFP_NOIO);
@ -4969,7 +4968,8 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
err = drbd_submit_peer_request(device, peer_req, op,
DRBD_FAULT_RS_WR);
if (err) {
spin_lock_irq(&device->resource->req_lock);

View File

@ -523,16 +523,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
char b[BDEVNAME_SIZE];
if (!__ratelimit(&drbd_ratelimit_state))
return;
drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
drbd_warn(device, "local %s IO error sector %llu+%u on %pg\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
bdevname(device->ldev->backing_bdev, b));
device->ldev->backing_bdev);
}
/* Helper for HANDED_OVER_TO_NETWORK.

View File

@ -405,7 +405,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
DRBD_FAULT_RS_RD) == 0)
return 0;

View File

@ -2859,7 +2859,7 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
(__force unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
if (test_and_set_bit(0, &fdc_busy)) {
@ -4557,7 +4557,7 @@ static void floppy_probe(dev_t dev)
return;
cleanup_disk:
blk_cleanup_disk(disks[drive][type]);
put_disk(disks[drive][type]);
disks[drive][type] = NULL;
mutex_unlock(&floppy_probe_lock);
}
@ -4753,7 +4753,7 @@ static int __init do_floppy_init(void)
if (!disks[drive][0])
break;
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_disk(disks[drive][0]);
put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
return err;
@ -4985,7 +4985,7 @@ static void __exit floppy_module_exit(void)
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
blk_cleanup_disk(disks[drive][i]);
put_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
}

View File

@ -2040,7 +2040,7 @@ static int loop_add(int i)
return i;
out_cleanup_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_disk->queue);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);

View File

@ -94,17 +94,12 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
static LIST_HEAD(online_list);
static LIST_HEAD(removing_list);
static DEFINE_SPINLOCK(dev_lock);
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@ -146,11 +141,8 @@ static bool mtip_check_surprise_removal(struct driver_data *dd)
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
if (dd->queue)
blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
else
dev_warn(&dd->pdev->dev,
"%s: dd->queue is NULL\n", __func__);
if (dd->disk)
blk_mark_disk_dead(dd->disk);
return true; /* device removed */
}
@ -2170,106 +2162,6 @@ static const struct attribute_group *mtip_disk_attr_groups[] = {
NULL,
};
/* debugsfs entries */
static ssize_t show_device_status(struct device_driver *drv, char *buf)
{
int size = 0;
struct driver_data *dd, *tmp;
unsigned long flags;
char id_buf[42];
u16 status = 0;
spin_lock_irqsave(&dev_lock, flags);
size += sprintf(&buf[size], "Devices Present:\n");
list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
if (dd->pdev) {
if (dd->port &&
dd->port->identify &&
dd->port->identify_valid) {
strlcpy(id_buf,
(char *) (dd->port->identify + 10), 21);
status = *(dd->port->identify + 141);
} else {
memset(id_buf, 0, 42);
status = 0;
}
if (dd->port &&
test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
size += sprintf(&buf[size],
" device %s %s (ftl rebuild %d %%)\n",
dev_name(&dd->pdev->dev),
id_buf,
status);
} else {
size += sprintf(&buf[size],
" device %s %s\n",
dev_name(&dd->pdev->dev),
id_buf);
}
}
}
size += sprintf(&buf[size], "Devices Being Removed:\n");
list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
if (dd->pdev) {
if (dd->port &&
dd->port->identify &&
dd->port->identify_valid) {
strlcpy(id_buf,
(char *) (dd->port->identify+10), 21);
status = *(dd->port->identify + 141);
} else {
memset(id_buf, 0, 42);
status = 0;
}
if (dd->port &&
test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
size += sprintf(&buf[size],
" device %s %s (ftl rebuild %d %%)\n",
dev_name(&dd->pdev->dev),
id_buf,
status);
} else {
size += sprintf(&buf[size],
" device %s %s\n",
dev_name(&dd->pdev->dev),
id_buf);
}
}
}
spin_unlock_irqrestore(&dev_lock, flags);
return size;
}
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
int size = *offset;
char *buf;
int rv = 0;
if (!len || *offset)
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
size += show_device_status(NULL, buf);
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
rv = -EFAULT;
kfree(buf);
return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@ -2363,13 +2255,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return rv ? rv : *offset;
}
static const struct file_operations mtip_device_status_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_device_status,
.llseek = no_llseek,
};
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@ -2556,7 +2441,7 @@ static void mtip_softirq_done_fn(struct request *rq)
blk_mq_end_request(rq, cmd->status);
}
static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
static bool mtip_abort_cmd(struct request *req, void *data)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
@ -2569,7 +2454,7 @@ static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
return true;
}
static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
static bool mtip_queue_cmd(struct request *req, void *data)
{
struct driver_data *dd = data;
@ -3297,26 +3182,12 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
static int mtip_block_open(struct block_device *dev, fmode_t mode)
static void mtip_block_free_disk(struct gendisk *disk)
{
struct driver_data *dd;
struct driver_data *dd = disk->private_data;
if (dev && dev->bd_disk) {
dd = (struct driver_data *) dev->bd_disk->private_data;
if (dd) {
if (test_bit(MTIP_DDF_REMOVAL_BIT,
&dd->dd_flag)) {
return -ENODEV;
}
return 0;
}
}
return -ENODEV;
}
static void mtip_block_release(struct gendisk *disk, fmode_t mode)
{
ida_free(&rssd_index_ida, dd->index);
kfree(dd);
}
/*
@ -3326,13 +3197,12 @@ static void mtip_block_release(struct gendisk *disk, fmode_t mode)
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
.open = mtip_block_open,
.release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
#endif
.getgeo = mtip_block_getgeo,
.free_disk = mtip_block_free_disk,
.owner = THIS_MODULE
};
@ -3487,12 +3357,11 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
return 0;
}
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
bool reserved)
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
{
struct driver_data *dd = req->q->queuedata;
if (reserved) {
if (blk_mq_is_reserved_rq(req)) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_TIMEOUT;
@ -3664,7 +3533,7 @@ static int mtip_block_initialize(struct driver_data *dd)
disk_index_error:
ida_free(&rssd_index_ida, index);
ida_get_error:
blk_cleanup_disk(dd->disk);
put_disk(dd->disk);
block_queue_alloc_init_error:
blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_tag_error:
@ -3673,72 +3542,6 @@ static int mtip_block_initialize(struct driver_data *dd)
return rv;
}
static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(rq);
return true;
}
/*
* Block layer deinitialization function.
*
* Called by the PCI layer as each P320 device is removed.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_block_remove(struct driver_data *dd)
{
mtip_hw_debugfs_exit(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
if (!dd->sr) {
/*
* Explicitly wait here for IOs to quiesce,
* as mtip_standby_drive usually won't wait for IOs.
*/
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
mtip_standby_drive(dd);
}
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
blk_freeze_queue_start(dd->queue);
blk_mq_quiesce_queue(dd->queue);
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
blk_mq_unquiesce_queue(dd->queue);
if (dd->disk) {
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
}
put_disk(dd->disk);
}
dd->disk = NULL;
ida_free(&rssd_index_ida, dd->index);
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
return 0;
}
/*
* Function called by the PCI layer when just before the
* machine shuts down.
@ -3755,23 +3558,14 @@ static int mtip_block_shutdown(struct driver_data *dd)
{
mtip_hw_shutdown(dd);
/* Delete our gendisk structure, and cleanup the blk queue. */
if (dd->disk) {
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
}
put_disk(dd->disk);
dd->disk = NULL;
dd->queue = NULL;
}
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
ida_free(&rssd_index_ida, dd->index);
blk_mq_free_tag_set(&dd->tags);
put_disk(dd->disk);
return 0;
}
@ -3905,7 +3699,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@ -3952,9 +3745,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
INIT_LIST_HEAD(&dd->online_list);
INIT_LIST_HEAD(&dd->remove_list);
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@ -4047,11 +3837,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
else
rv = 0; /* device in rebuild state, return 0 from probe */
/* Add to online list even if in ftl rebuild */
spin_lock_irqsave(&dev_lock, flags);
list_add(&dd->online_list, &online_list);
spin_unlock_irqrestore(&dev_lock, flags);
goto done;
block_initialize_err:
@ -4085,14 +3870,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags);
unsigned long to;
mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
@ -4109,11 +3887,35 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
mtip_block_remove(dd);
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
mtip_hw_debugfs_exit(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
if (!dd->sr) {
/*
* Explicitly wait here for IOs to quiesce,
* as mtip_standby_drive usually won't wait for IOs.
*/
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
mtip_standby_drive(dd);
}
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
blk_mq_free_tag_set(&dd->tags);
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
if (dd->isr_workq) {
destroy_workqueue(dd->isr_workq);
@ -4124,14 +3926,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->remove_list);
spin_unlock_irqrestore(&dev_lock, flags);
kfree(dd);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
}
/*
@ -4250,15 +4048,6 @@ static int __init mtip_init(void)
pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL;
}
if (dfs_parent) {
dfs_device_status = debugfs_create_file("device_status",
0444, dfs_parent, NULL,
&mtip_device_status_fops);
if (IS_ERR_OR_NULL(dfs_device_status)) {
pr_err("Error creating device_status node\n");
dfs_device_status = NULL;
}
}
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);

View File

@ -149,7 +149,6 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
@ -462,10 +461,6 @@ struct driver_data {
int isr_binding;
struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */
int unal_qdepth; /* qdepth of unaligned IO queue */
};

View File

@ -157,7 +157,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out:
return err;
}

View File

@ -250,7 +250,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
struct gendisk *disk = nbd->disk;
del_gendisk(disk);
blk_cleanup_disk(disk);
put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
/*
@ -393,8 +393,7 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
bool reserved)
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
@ -880,7 +879,7 @@ static void recv_work(struct work_struct *work)
kfree(args);
}
static bool nbd_clear_req(struct request *req, void *data, bool reserved)
static bool nbd_clear_req(struct request *req, void *data)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@ -1833,7 +1832,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
out_free_work:
destroy_workqueue(nbd->recv_workq);
out_err_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);

View File

@ -1310,7 +1310,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
enum req_opf op,
enum req_op op,
sector_t sector,
sector_t nr_sectors)
{
@ -1381,9 +1381,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
}
}
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
unsigned int nr_sectors)
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
@ -1401,7 +1400,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_opf op)
sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
@ -1578,7 +1577,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return nr;
}
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
@ -1737,7 +1736,7 @@ static void null_del_dev(struct nullb *nullb)
null_restart_queue_async(nullb);
}
blk_cleanup_disk(nullb->disk);
put_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@ -2082,7 +2081,7 @@ static int null_add_dev(struct nullb_device *dev)
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
blk_cleanup_disk(nullb->disk);
put_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);

View File

@ -136,9 +136,8 @@ struct nullb {
blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
unsigned int nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
@ -146,9 +145,8 @@ int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
@ -164,7 +162,7 @@ static inline int null_register_zoned_dev(struct nullb *nullb)
}
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector, sector_t nr_sectors)
enum req_op op, sector_t sector, sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}

View File

@ -36,7 +36,7 @@ TRACE_EVENT(nullb_zone_op,
TP_ARGS(cmd, zone_no, zone_cond),
TP_STRUCT__entry(
__array(char, disk, DISK_NAME_LEN)
__field(enum req_opf, op)
__field(enum req_op, op)
__field(unsigned int, zone_no)
__field(unsigned int, zone_cond)
),

View File

@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
@ -170,12 +170,12 @@ int null_register_zoned_dev(struct nullb *nullb)
return ret;
} else {
blk_queue_chunk_sectors(q, dev->zone_size_sects);
q->nr_zones = blkdev_nr_zones(nullb->disk);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
blk_queue_max_open_zones(q, dev->zone_max_open);
blk_queue_max_active_zones(q, dev->zone_max_active);
disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
return 0;
}
@ -600,7 +600,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
return BLK_STS_OK;
}
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
@ -653,7 +653,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev;

View File

@ -956,7 +956,7 @@ static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
out_pi_release:
pi_release(cd->pi);
out_free_disk:
blk_cleanup_disk(cd->disk);
put_disk(cd->disk);
out_free_tag_set:
blk_mq_free_tag_set(&cd->tag_set);
return ret;
@ -1029,7 +1029,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
del_gendisk(cd->disk);
pi_release(cd->pi);
blk_cleanup_disk(cd->disk);
put_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
}

View File

@ -501,6 +501,8 @@ static enum action do_pd_io_start(void)
return do_pd_read_start();
else
return do_pd_write_start();
default:
break;
}
return Fail;
}
@ -943,7 +945,7 @@ static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
goto cleanup_disk;
return 0;
cleanup_disk:
blk_cleanup_disk(disk->gd);
put_disk(disk->gd);
put_disk:
put_disk(p);
disk->gd = NULL;
@ -1018,7 +1020,7 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
blk_cleanup_disk(p);
put_disk(p);
blk_mq_free_tag_set(&disk->tag_set);
pi_release(disk->pi);
}

View File

@ -975,7 +975,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
out_pi_release:
pi_release(pf->pi);
out_free_disk:
blk_cleanup_disk(pf->disk);
put_disk(pf->disk);
out_free_tag_set:
blk_mq_free_tag_set(&pf->tag_set);
return ret;
@ -1044,7 +1044,7 @@ static void __exit pf_exit(void)
if (!pf->present)
continue;
del_gendisk(pf->disk);
blk_cleanup_disk(pf->disk);
put_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
pi_release(pf->pi);
}

View File

@ -2460,11 +2460,9 @@ static int pkt_seq_show(struct seq_file *m, void *p)
{
struct pktcdvd_device *pd = m->private;
char *msg;
char bdev_buf[BDEVNAME_SIZE];
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
bdevname(pd->bdev, bdev_buf));
seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@ -2521,7 +2519,6 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
struct scsi_device *sdev;
@ -2534,8 +2531,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
pkt_err(pd, "%s already setup\n",
bdevname(pd2->bdev, b));
pkt_err(pd, "%pg already setup\n", pd2->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@ -2570,7 +2566,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
return 0;
out_mem:
@ -2733,7 +2729,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
return 0;
out_mem2:
blk_cleanup_disk(disk);
put_disk(disk);
out_mem:
mempool_exit(&pd->rb_pool);
kfree(pd);
@ -2783,7 +2779,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
blk_cleanup_disk(pd->disk);
put_disk(pd->disk);
mempool_exit(&pd->rb_pool);
kfree(pd);

View File

@ -473,7 +473,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
return 0;
fail_cleanup_disk:
blk_cleanup_disk(gendisk);
put_disk(gendisk);
fail_free_tag_set:
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
@ -500,7 +500,7 @@ static void ps3disk_remove(struct ps3_system_bus_device *_dev)
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
blk_cleanup_disk(priv->gendisk);
put_disk(priv->gendisk);
blk_mq_free_tag_set(&priv->tag_set);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);

View File

@ -761,7 +761,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
return 0;
out_cleanup_disk:
blk_cleanup_disk(gendisk);
put_disk(gendisk);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
@ -792,7 +792,7 @@ static void ps3vram_remove(struct ps3_system_bus_device *dev)
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
blk_cleanup_disk(priv->gendisk);
put_disk(priv->gendisk);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);

View File

@ -4729,7 +4729,7 @@ static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
blk_cleanup_disk(rbd_dev->disk);
put_disk(rbd_dev->disk);
blk_mq_free_tag_set(&rbd_dev->tag_set);
rbd_dev->disk = NULL;
}

View File

@ -1408,7 +1408,7 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
blk_cleanup_disk(dev->gd);
put_disk(dev->gd);
return err;
}
@ -1630,7 +1630,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
static void destroy_gen_disk(struct rnbd_clt_dev *dev)
{
del_gendisk(dev->gd);
blk_cleanup_disk(dev->gd);
put_disk(dev->gd);
}
static void destroy_sysfs(struct rnbd_clt_dev *dev,
@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
* Here unmap happens in parallel for only one reason:
* blk_cleanup_queue() takes around half a second, so
* del_gendisk() takes around half a second, so
* on huge amount of devices the whole module unload
* procedure takes minutes.
*/

View File

@ -229,9 +229,9 @@ static inline bool rnbd_flags_supported(u32 flags)
return true;
}
static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
{
u32 bio_opf;
blk_opf_t bio_opf;
switch (rnbd_op(rnbd_opf)) {
case RNBD_OP_READ:
@ -286,7 +286,8 @@ static inline u32 rq_to_rnbd_flags(struct request *rq)
break;
default:
WARN(1, "Unknown request type %d (flags %llu)\n",
req_op(rq), (unsigned long long)rq->cmd_flags);
(__force u32)req_op(rq),
(__force unsigned long long)rq->cmd_flags);
rnbd_opf = 0;
}

View File

@ -28,7 +28,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
goto err;
dev->blk_open_flags = flags;
bdevname(dev->bdev, dev->name);
return dev;

View File

@ -15,7 +15,6 @@
struct rnbd_dev {
struct block_device *bdev;
fmode_t blk_open_flags;
char name[BDEVNAME_SIZE];
};
/**

View File

@ -38,14 +38,13 @@ static struct kobj_type dev_ktype = {
};
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
struct block_device *bdev,
const char *dev_name)
struct block_device *bdev)
{
struct kobject *bdev_kobj;
int ret;
ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
rnbd_devs_kobj, dev_name);
rnbd_devs_kobj, "%pg", bdev);
if (ret) {
kobject_put(&dev->dev_kobj);
return ret;

View File

@ -419,7 +419,7 @@ static struct rnbd_srv_sess_dev
return sess_dev;
}
static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev)
{
struct rnbd_srv_dev *dev;
@ -427,7 +427,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
if (!dev)
return ERR_PTR(-ENOMEM);
strscpy(dev->id, id, sizeof(dev->id));
snprintf(dev->id, sizeof(dev->id), "%pg", bdev);
kref_init(&dev->kref);
INIT_LIST_HEAD(&dev->sess_dev_list);
mutex_init(&dev->lock);
@ -512,7 +512,7 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
int ret;
struct rnbd_srv_dev *new_dev, *dev;
new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev);
if (IS_ERR(new_dev))
return new_dev;
@ -758,8 +758,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
rnbd_dev->name);
ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,

View File

@ -68,8 +68,7 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
/* rnbd-srv-sysfs.c */
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
struct block_device *bdev,
const char *dir_name);
struct block_device *bdev);
void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);

View File

@ -886,7 +886,7 @@ static int probe_disk(struct vdc_port *port)
return 0;
out_cleanup_disk:
blk_cleanup_disk(g);
put_disk(g);
out_free_tag:
blk_mq_free_tag_set(&port->tag_set);
return err;
@ -1070,7 +1070,7 @@ static void vdc_port_remove(struct vio_dev *vdev)
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
blk_cleanup_disk(port->disk);
put_disk(port->disk);
blk_mq_free_tag_set(&port->tag_set);
vdc_free_tx_ring(port);

View File

@ -783,7 +783,7 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
if (fs->registered)
del_gendisk(fs->disk);
blk_cleanup_disk(disk);
put_disk(disk);
blk_mq_free_tag_set(&fs->tag_set);
}

View File

@ -1238,7 +1238,7 @@ static int swim3_attach(struct macio_dev *mdev,
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
put_disk(disk);
out_free_tag_set:
blk_mq_free_tag_set(&fs->tag_set);
out_unregister:

View File

@ -1377,7 +1377,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
if (host->state > HST_DEV_ACTIVATE)
del_gendisk(disk);
blk_cleanup_disk(disk);
put_disk(disk);
}
static int carm_init_shm(struct carm_host *host)
@ -1536,7 +1536,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
err_out_dma_free:
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);

1545
drivers/block/ublk_drv.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1089,7 +1089,7 @@ static int virtblk_probe(struct virtio_device *vdev)
return 0;
out_cleanup_disk:
blk_cleanup_disk(vblk->disk);
put_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@ -1111,7 +1111,6 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);

View File

@ -442,7 +442,7 @@ static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
* Routines for managing virtual block devices (vbds).
*/
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
int operation)
enum req_op operation)
{
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
@ -1193,8 +1193,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio *bio = NULL;
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
int operation;
int operation_flags = 0;
enum req_op operation;
blk_opf_t operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;

View File

@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
if (err) {
blk_cleanup_disk(info->gd);
put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
goto fail;
@ -2482,7 +2482,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
blkif_free(info, 0);
if (info->gd) {
xlbd_release_minors(info->gd->first_minor, info->gd->minors);
blk_cleanup_disk(info->gd);
put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
}

View File

@ -337,7 +337,7 @@ static int z2ram_register_disk(int minor)
z2ram_gendisk[minor] = disk;
err = add_disk(disk);
if (err)
blk_cleanup_disk(disk);
put_disk(disk);
return err;
}
@ -384,7 +384,6 @@ static void __exit z2_exit(void)
for (i = 0; i < Z2MINOR_COUNT; i++) {
del_gendisk(z2ram_gendisk[i]);
blk_cleanup_queue(z2ram_gendisk[i]->queue);
put_disk(z2ram_gendisk[i]);
}
blk_mq_free_tag_set(&tag_set);

View File

@ -1523,7 +1523,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, unsigned int op, struct bio *bio)
int offset, enum req_op op, struct bio *bio)
{
int ret;
@ -1631,7 +1631,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, unsigned int op)
struct page *page, enum req_op op)
{
int offset, ret;
u32 index;
@ -1957,7 +1957,7 @@ static int zram_add(void)
return device_id;
out_cleanup_disk:
blk_cleanup_disk(zram->disk);
put_disk(zram->disk);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
@ -2008,7 +2008,7 @@ static int zram_remove(struct zram *zram)
*/
zram_reset_device(zram);
blk_cleanup_disk(zram->disk);
put_disk(zram->disk);
kfree(zram);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More