mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
block: simplify disk shutdown
Set the queue dying flag and call blk_mq_exit_queue from del_gendisk for all disks that do not have separately allocated queues, and thus remove the need to call blk_cleanup_queue for them. Rename blk_cleanup_disk to blk_mq_destroy_queue to make it clear that this function is intended only for separately allocated blk-mq queues. This saves an extra queue freeze for devices without a separately allocated queue. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://lore.kernel.org/r/20220619060552.1850436-6-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0e3534022f
commit
6f8191fdf4
@ -284,43 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
|
|||||||
wake_up_all(&q->mq_freeze_wq);
|
wake_up_all(&q->mq_freeze_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_cleanup_queue - shutdown a request queue
|
|
||||||
* @q: request queue to shutdown
|
|
||||||
*
|
|
||||||
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
|
|
||||||
* put it. All future requests will be failed immediately with -ENODEV.
|
|
||||||
*
|
|
||||||
* Context: can sleep
|
|
||||||
*/
|
|
||||||
void blk_cleanup_queue(struct request_queue *q)
|
|
||||||
{
|
|
||||||
/* cannot be called from atomic context */
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
WARN_ON_ONCE(blk_queue_registered(q));
|
|
||||||
|
|
||||||
/* mark @q DYING, no new request or merges will be allowed afterwards */
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
|
||||||
blk_queue_start_drain(q);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Drain all requests queued before DYING marking. Set DEAD flag to
|
|
||||||
* prevent that blk_mq_run_hw_queues() accesses the hardware queues
|
|
||||||
* after draining finished.
|
|
||||||
*/
|
|
||||||
blk_freeze_queue(q);
|
|
||||||
blk_sync_queue(q);
|
|
||||||
if (queue_is_mq(q)) {
|
|
||||||
blk_mq_cancel_work_sync(q);
|
|
||||||
blk_mq_exit_queue(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* @q is and will stay empty, shutdown and put */
|
|
||||||
blk_put_queue(q);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_enter() - try to increase q->q_usage_counter
|
* blk_queue_enter() - try to increase q->q_usage_counter
|
||||||
* @q: request queue pointer
|
* @q: request queue pointer
|
||||||
|
@ -3902,7 +3902,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
|
|||||||
q->queuedata = queuedata;
|
q->queuedata = queuedata;
|
||||||
ret = blk_mq_init_allocated_queue(set, q);
|
ret = blk_mq_init_allocated_queue(set, q);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
blk_cleanup_queue(q);
|
blk_put_queue(q);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
return q;
|
return q;
|
||||||
@ -3914,6 +3914,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_init_queue);
|
EXPORT_SYMBOL(blk_mq_init_queue);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_mq_destroy_queue - shutdown a request queue
|
||||||
|
* @q: request queue to shutdown
|
||||||
|
*
|
||||||
|
* This shuts down a request queue allocated by blk_mq_init_queue() and drops
|
||||||
|
* the initial reference. All future requests will failed with -ENODEV.
|
||||||
|
*
|
||||||
|
* Context: can sleep
|
||||||
|
*/
|
||||||
|
void blk_mq_destroy_queue(struct request_queue *q)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!queue_is_mq(q));
|
||||||
|
WARN_ON_ONCE(blk_queue_registered(q));
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||||
|
blk_queue_start_drain(q);
|
||||||
|
blk_freeze_queue(q);
|
||||||
|
|
||||||
|
blk_sync_queue(q);
|
||||||
|
blk_mq_cancel_work_sync(q);
|
||||||
|
blk_mq_exit_queue(q);
|
||||||
|
|
||||||
|
/* @q is and will stay empty, shutdown and put */
|
||||||
|
blk_put_queue(q);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_mq_destroy_queue);
|
||||||
|
|
||||||
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
|
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
|
||||||
struct lock_class_key *lkclass)
|
struct lock_class_key *lkclass)
|
||||||
{
|
{
|
||||||
@ -3926,13 +3955,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
|
|||||||
|
|
||||||
disk = __alloc_disk_node(q, set->numa_node, lkclass);
|
disk = __alloc_disk_node(q, set->numa_node, lkclass);
|
||||||
if (!disk) {
|
if (!disk) {
|
||||||
blk_cleanup_queue(q);
|
blk_put_queue(q);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
set_bit(GD_OWNS_QUEUE, &disk->state);
|
||||||
return disk;
|
return disk;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__blk_mq_alloc_disk);
|
EXPORT_SYMBOL(__blk_mq_alloc_disk);
|
||||||
|
|
||||||
|
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
|
||||||
|
struct lock_class_key *lkclass)
|
||||||
|
{
|
||||||
|
if (!blk_get_queue(q))
|
||||||
|
return NULL;
|
||||||
|
return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
|
||||||
|
|
||||||
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
||||||
struct blk_mq_tag_set *set, struct request_queue *q,
|
struct blk_mq_tag_set *set, struct request_queue *q,
|
||||||
int hctx_idx, int node)
|
int hctx_idx, int node)
|
||||||
|
@ -755,11 +755,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
|||||||
* decremented with blk_put_queue(). Once the refcount reaches 0 this function
|
* decremented with blk_put_queue(). Once the refcount reaches 0 this function
|
||||||
* is called.
|
* is called.
|
||||||
*
|
*
|
||||||
* For drivers that have a request_queue on a gendisk and added with
|
|
||||||
* __device_add_disk() the refcount to request_queue will reach 0 with
|
|
||||||
* the last put_disk() called by the driver. For drivers which don't use
|
|
||||||
* __device_add_disk() this happens with blk_cleanup_queue().
|
|
||||||
*
|
|
||||||
* Drivers exist which depend on the release of the request_queue to be
|
* Drivers exist which depend on the release of the request_queue to be
|
||||||
* synchronous, it should not be deferred.
|
* synchronous, it should not be deferred.
|
||||||
*
|
*
|
||||||
|
@ -424,6 +424,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
|
|||||||
sector_t length);
|
sector_t length);
|
||||||
void blk_drop_partitions(struct gendisk *disk);
|
void blk_drop_partitions(struct gendisk *disk);
|
||||||
|
|
||||||
|
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||||
|
struct lock_class_key *lkclass);
|
||||||
|
|
||||||
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
|
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
|
||||||
struct page *page, unsigned int len, unsigned int offset,
|
struct page *page, unsigned int len, unsigned int offset,
|
||||||
unsigned int max_sectors, bool *same_page);
|
unsigned int max_sectors, bool *same_page);
|
||||||
|
@ -324,7 +324,7 @@ void bsg_remove_queue(struct request_queue *q)
|
|||||||
container_of(q->tag_set, struct bsg_set, tag_set);
|
container_of(q->tag_set, struct bsg_set, tag_set);
|
||||||
|
|
||||||
bsg_unregister_queue(bset->bd);
|
bsg_unregister_queue(bset->bd);
|
||||||
blk_cleanup_queue(q);
|
blk_mq_destroy_queue(q);
|
||||||
blk_mq_free_tag_set(&bset->tag_set);
|
blk_mq_free_tag_set(&bset->tag_set);
|
||||||
kfree(bset);
|
kfree(bset);
|
||||||
}
|
}
|
||||||
@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
|||||||
|
|
||||||
return q;
|
return q;
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
blk_cleanup_queue(q);
|
blk_mq_destroy_queue(q);
|
||||||
out_queue:
|
out_queue:
|
||||||
blk_mq_free_tag_set(set);
|
blk_mq_free_tag_set(set);
|
||||||
out_tag_set:
|
out_tag_set:
|
||||||
|
@ -617,6 +617,8 @@ void del_gendisk(struct gendisk *disk)
|
|||||||
* Fail any new I/O.
|
* Fail any new I/O.
|
||||||
*/
|
*/
|
||||||
set_bit(GD_DEAD, &disk->state);
|
set_bit(GD_DEAD, &disk->state);
|
||||||
|
if (test_bit(GD_OWNS_QUEUE, &disk->state))
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||||
set_capacity(disk, 0);
|
set_capacity(disk, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -663,11 +665,16 @@ void del_gendisk(struct gendisk *disk)
|
|||||||
blk_mq_unquiesce_queue(q);
|
blk_mq_unquiesce_queue(q);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow using passthrough request again after the queue is torn down.
|
* If the disk does not own the queue, allow using passthrough requests
|
||||||
|
* again. Else leave the queue frozen to fail all I/O.
|
||||||
*/
|
*/
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
|
||||||
__blk_mq_unfreeze_queue(q, true);
|
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
||||||
|
__blk_mq_unfreeze_queue(q, true);
|
||||||
|
} else {
|
||||||
|
if (queue_is_mq(q))
|
||||||
|
blk_mq_exit_queue(q);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(del_gendisk);
|
EXPORT_SYMBOL(del_gendisk);
|
||||||
|
|
||||||
@ -1338,9 +1345,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
|||||||
{
|
{
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
|
|
||||||
if (!blk_get_queue(q))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
|
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto out_put_queue;
|
goto out_put_queue;
|
||||||
@ -1391,7 +1395,6 @@ out_put_queue:
|
|||||||
blk_put_queue(q);
|
blk_put_queue(q);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__alloc_disk_node);
|
|
||||||
|
|
||||||
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
|
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
|
||||||
{
|
{
|
||||||
@ -1404,9 +1407,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
|
|||||||
|
|
||||||
disk = __alloc_disk_node(q, node, lkclass);
|
disk = __alloc_disk_node(q, node, lkclass);
|
||||||
if (!disk) {
|
if (!disk) {
|
||||||
blk_cleanup_queue(q);
|
blk_put_queue(q);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
set_bit(GD_OWNS_QUEUE, &disk->state);
|
||||||
return disk;
|
return disk;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__blk_alloc_disk);
|
EXPORT_SYMBOL(__blk_alloc_disk);
|
||||||
@ -1439,7 +1443,6 @@ EXPORT_SYMBOL(put_disk);
|
|||||||
*/
|
*/
|
||||||
void blk_cleanup_disk(struct gendisk *disk)
|
void blk_cleanup_disk(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
blk_cleanup_queue(disk->queue);
|
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_cleanup_disk);
|
EXPORT_SYMBOL(blk_cleanup_disk);
|
||||||
|
@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
|
|||||||
if (!unit[i].disk[type])
|
if (!unit[i].disk[type])
|
||||||
continue;
|
continue;
|
||||||
del_gendisk(unit[i].disk[type]);
|
del_gendisk(unit[i].disk[type]);
|
||||||
blk_cleanup_queue(unit[i].disk[type]->queue);
|
|
||||||
put_disk(unit[i].disk[type]);
|
put_disk(unit[i].disk[type]);
|
||||||
}
|
}
|
||||||
blk_mq_free_tag_set(&unit[i].tag_set);
|
blk_mq_free_tag_set(&unit[i].tag_set);
|
||||||
|
@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo)
|
|||||||
{
|
{
|
||||||
/* Make this loop device unreachable from pathname. */
|
/* Make this loop device unreachable from pathname. */
|
||||||
del_gendisk(lo->lo_disk);
|
del_gendisk(lo->lo_disk);
|
||||||
blk_cleanup_queue(lo->lo_disk->queue);
|
|
||||||
blk_mq_free_tag_set(&lo->tag_set);
|
blk_mq_free_tag_set(&lo->tag_set);
|
||||||
|
|
||||||
mutex_lock(&loop_ctl_mutex);
|
mutex_lock(&loop_ctl_mutex);
|
||||||
|
@ -3565,7 +3565,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
|
|||||||
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
|
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
|
||||||
del_gendisk(dd->disk);
|
del_gendisk(dd->disk);
|
||||||
|
|
||||||
blk_cleanup_queue(dd->queue);
|
|
||||||
blk_mq_free_tag_set(&dd->tags);
|
blk_mq_free_tag_set(&dd->tags);
|
||||||
put_disk(dd->disk);
|
put_disk(dd->disk);
|
||||||
return 0;
|
return 0;
|
||||||
@ -3914,7 +3913,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||||||
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
|
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
|
||||||
dd->disk->disk_name);
|
dd->disk->disk_name);
|
||||||
|
|
||||||
blk_cleanup_queue(dd->queue);
|
|
||||||
blk_mq_free_tag_set(&dd->tags);
|
blk_mq_free_tag_set(&dd->tags);
|
||||||
|
|
||||||
/* De-initialize the protocol layer. */
|
/* De-initialize the protocol layer. */
|
||||||
|
@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
|
|||||||
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
|
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
|
||||||
/*
|
/*
|
||||||
* Here unmap happens in parallel for only one reason:
|
* Here unmap happens in parallel for only one reason:
|
||||||
* blk_cleanup_queue() takes around half a second, so
|
* del_gendisk() takes around half a second, so
|
||||||
* on huge amount of devices the whole module unload
|
* on huge amount of devices the whole module unload
|
||||||
* procedure takes minutes.
|
* procedure takes minutes.
|
||||||
*/
|
*/
|
||||||
|
@ -1536,7 +1536,7 @@ err_out_free_majors:
|
|||||||
clear_bit(0, &carm_major_alloc);
|
clear_bit(0, &carm_major_alloc);
|
||||||
else if (host->major == 161)
|
else if (host->major == 161)
|
||||||
clear_bit(1, &carm_major_alloc);
|
clear_bit(1, &carm_major_alloc);
|
||||||
blk_cleanup_queue(host->oob_q);
|
blk_mq_destroy_queue(host->oob_q);
|
||||||
blk_mq_free_tag_set(&host->tag_set);
|
blk_mq_free_tag_set(&host->tag_set);
|
||||||
err_out_dma_free:
|
err_out_dma_free:
|
||||||
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
|
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
|
||||||
@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
|
|||||||
clear_bit(0, &carm_major_alloc);
|
clear_bit(0, &carm_major_alloc);
|
||||||
else if (host->major == 161)
|
else if (host->major == 161)
|
||||||
clear_bit(1, &carm_major_alloc);
|
clear_bit(1, &carm_major_alloc);
|
||||||
blk_cleanup_queue(host->oob_q);
|
blk_mq_destroy_queue(host->oob_q);
|
||||||
blk_mq_free_tag_set(&host->tag_set);
|
blk_mq_free_tag_set(&host->tag_set);
|
||||||
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
|
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
|
||||||
iounmap(host->mmio);
|
iounmap(host->mmio);
|
||||||
|
@ -1111,7 +1111,6 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|||||||
flush_work(&vblk->config_work);
|
flush_work(&vblk->config_work);
|
||||||
|
|
||||||
del_gendisk(vblk->disk);
|
del_gendisk(vblk->disk);
|
||||||
blk_cleanup_queue(vblk->disk->queue);
|
|
||||||
blk_mq_free_tag_set(&vblk->tag_set);
|
blk_mq_free_tag_set(&vblk->tag_set);
|
||||||
|
|
||||||
mutex_lock(&vblk->vdev_mutex);
|
mutex_lock(&vblk->vdev_mutex);
|
||||||
|
@ -384,7 +384,6 @@ static void __exit z2_exit(void)
|
|||||||
|
|
||||||
for (i = 0; i < Z2MINOR_COUNT; i++) {
|
for (i = 0; i < Z2MINOR_COUNT; i++) {
|
||||||
del_gendisk(z2ram_gendisk[i]);
|
del_gendisk(z2ram_gendisk[i]);
|
||||||
blk_cleanup_queue(z2ram_gendisk[i]->queue);
|
|
||||||
put_disk(z2ram_gendisk[i]);
|
put_disk(z2ram_gendisk[i]);
|
||||||
}
|
}
|
||||||
blk_mq_free_tag_set(&tag_set);
|
blk_mq_free_tag_set(&tag_set);
|
||||||
|
@ -831,7 +831,6 @@ probe_fail_no_mem:
|
|||||||
|
|
||||||
static int remove_gdrom(struct platform_device *devptr)
|
static int remove_gdrom(struct platform_device *devptr)
|
||||||
{
|
{
|
||||||
blk_cleanup_queue(gd.gdrom_rq);
|
|
||||||
blk_mq_free_tag_set(&gd.tag_set);
|
blk_mq_free_tag_set(&gd.tag_set);
|
||||||
free_irq(HW_EVENT_GDROM_CMD, &gd);
|
free_irq(HW_EVENT_GDROM_CMD, &gd);
|
||||||
free_irq(HW_EVENT_GDROM_DMA, &gd);
|
free_irq(HW_EVENT_GDROM_DMA, &gd);
|
||||||
|
@ -2187,7 +2187,6 @@ static void msb_remove(struct memstick_dev *card)
|
|||||||
|
|
||||||
/* Remove the disk */
|
/* Remove the disk */
|
||||||
del_gendisk(msb->disk);
|
del_gendisk(msb->disk);
|
||||||
blk_cleanup_queue(msb->queue);
|
|
||||||
blk_mq_free_tag_set(&msb->tag_set);
|
blk_mq_free_tag_set(&msb->tag_set);
|
||||||
msb->queue = NULL;
|
msb->queue = NULL;
|
||||||
|
|
||||||
|
@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
|
|||||||
del_gendisk(msb->disk);
|
del_gendisk(msb->disk);
|
||||||
dev_dbg(&card->dev, "mspro block remove\n");
|
dev_dbg(&card->dev, "mspro block remove\n");
|
||||||
|
|
||||||
blk_cleanup_queue(msb->queue);
|
|
||||||
blk_mq_free_tag_set(&msb->tag_set);
|
blk_mq_free_tag_set(&msb->tag_set);
|
||||||
msb->queue = NULL;
|
msb->queue = NULL;
|
||||||
|
|
||||||
|
@ -2509,7 +2509,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
|
|||||||
return md;
|
return md;
|
||||||
|
|
||||||
err_cleanup_queue:
|
err_cleanup_queue:
|
||||||
blk_cleanup_queue(md->disk->queue);
|
|
||||||
blk_mq_free_tag_set(&md->queue.tag_set);
|
blk_mq_free_tag_set(&md->queue.tag_set);
|
||||||
err_kfree:
|
err_kfree:
|
||||||
kfree(md);
|
kfree(md);
|
||||||
|
@ -494,7 +494,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|||||||
if (blk_queue_quiesced(q))
|
if (blk_queue_quiesced(q))
|
||||||
blk_mq_unquiesce_queue(q);
|
blk_mq_unquiesce_queue(q);
|
||||||
|
|
||||||
blk_cleanup_queue(q);
|
|
||||||
blk_mq_free_tag_set(&mq->tag_set);
|
blk_mq_free_tag_set(&mq->tag_set);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1502,7 +1502,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (!blk_get_queue(anv->ctrl.admin_q)) {
|
if (!blk_get_queue(anv->ctrl.admin_q)) {
|
||||||
nvme_start_admin_queue(&anv->ctrl);
|
nvme_start_admin_queue(&anv->ctrl);
|
||||||
blk_cleanup_queue(anv->ctrl.admin_q);
|
blk_mq_destroy_queue(anv->ctrl.admin_q);
|
||||||
anv->ctrl.admin_q = NULL;
|
anv->ctrl.admin_q = NULL;
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto put_dev;
|
goto put_dev;
|
||||||
|
@ -4103,7 +4103,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||||||
if (!nvme_ns_head_multipath(ns->head))
|
if (!nvme_ns_head_multipath(ns->head))
|
||||||
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
|
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
|
||||||
del_gendisk(ns->disk);
|
del_gendisk(ns->disk);
|
||||||
blk_cleanup_queue(ns->queue);
|
|
||||||
|
|
||||||
down_write(&ns->ctrl->namespaces_rwsem);
|
down_write(&ns->ctrl->namespaces_rwsem);
|
||||||
list_del_init(&ns->list);
|
list_del_init(&ns->list);
|
||||||
|
@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (ctrl->ctrl.tagset) {
|
if (ctrl->ctrl.tagset) {
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref)
|
|||||||
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
|
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
|
||||||
|
|
||||||
nvme_start_admin_queue(&ctrl->ctrl);
|
nvme_start_admin_queue(&ctrl->ctrl);
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||||
|
|
||||||
kfree(ctrl->queues);
|
kfree(ctrl->queues);
|
||||||
@ -2953,7 +2953,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
out_delete_hw_queues:
|
out_delete_hw_queues:
|
||||||
nvme_fc_delete_hw_io_queues(ctrl);
|
nvme_fc_delete_hw_io_queues(ctrl);
|
||||||
out_cleanup_blk_queue:
|
out_cleanup_blk_queue:
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
out_free_tag_set:
|
out_free_tag_set:
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
nvme_fc_free_io_queues(ctrl);
|
nvme_fc_free_io_queues(ctrl);
|
||||||
@ -3642,9 +3642,9 @@ fail_ctrl:
|
|||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
|
|
||||||
out_cleanup_admin_q:
|
out_cleanup_admin_q:
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
out_cleanup_fabrics_q:
|
out_cleanup_fabrics_q:
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
out_free_admin_tag_set:
|
out_free_admin_tag_set:
|
||||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||||
out_free_queues:
|
out_free_queues:
|
||||||
|
@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
|
|||||||
* queue to flush these to completion.
|
* queue to flush these to completion.
|
||||||
*/
|
*/
|
||||||
nvme_start_admin_queue(&dev->ctrl);
|
nvme_start_admin_queue(&dev->ctrl);
|
||||||
blk_cleanup_queue(dev->ctrl.admin_q);
|
blk_mq_destroy_queue(dev->ctrl.admin_q);
|
||||||
blk_mq_free_tag_set(&dev->admin_tagset);
|
blk_mq_free_tag_set(&dev->admin_tagset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
|||||||
bool remove)
|
bool remove)
|
||||||
{
|
{
|
||||||
if (remove) {
|
if (remove) {
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
||||||
}
|
}
|
||||||
if (ctrl->async_event_sqe.data) {
|
if (ctrl->async_event_sqe.data) {
|
||||||
@ -935,10 +935,10 @@ out_stop_queue:
|
|||||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
out_cleanup_fabrics_q:
|
out_cleanup_fabrics_q:
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
out_free_tagset:
|
out_free_tagset:
|
||||||
if (new)
|
if (new)
|
||||||
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
||||||
@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
|
|||||||
bool remove)
|
bool remove)
|
||||||
{
|
{
|
||||||
if (remove) {
|
if (remove) {
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
blk_mq_free_tag_set(ctrl->ctrl.tagset);
|
blk_mq_free_tag_set(ctrl->ctrl.tagset);
|
||||||
}
|
}
|
||||||
nvme_rdma_free_io_queues(ctrl);
|
nvme_rdma_free_io_queues(ctrl);
|
||||||
@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out:
|
|||||||
out_cleanup_connect_q:
|
out_cleanup_connect_q:
|
||||||
nvme_cancel_tagset(&ctrl->ctrl);
|
nvme_cancel_tagset(&ctrl->ctrl);
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
out_free_tag_set:
|
out_free_tag_set:
|
||||||
if (new)
|
if (new)
|
||||||
blk_mq_free_tag_set(ctrl->ctrl.tagset);
|
blk_mq_free_tag_set(ctrl->ctrl.tagset);
|
||||||
|
@ -1885,7 +1885,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
|
|||||||
{
|
{
|
||||||
nvme_tcp_stop_io_queues(ctrl);
|
nvme_tcp_stop_io_queues(ctrl);
|
||||||
if (remove) {
|
if (remove) {
|
||||||
blk_cleanup_queue(ctrl->connect_q);
|
blk_mq_destroy_queue(ctrl->connect_q);
|
||||||
blk_mq_free_tag_set(ctrl->tagset);
|
blk_mq_free_tag_set(ctrl->tagset);
|
||||||
}
|
}
|
||||||
nvme_tcp_free_io_queues(ctrl);
|
nvme_tcp_free_io_queues(ctrl);
|
||||||
@ -1940,7 +1940,7 @@ out_wait_freeze_timed_out:
|
|||||||
out_cleanup_connect_q:
|
out_cleanup_connect_q:
|
||||||
nvme_cancel_tagset(ctrl);
|
nvme_cancel_tagset(ctrl);
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->connect_q);
|
blk_mq_destroy_queue(ctrl->connect_q);
|
||||||
out_free_tag_set:
|
out_free_tag_set:
|
||||||
if (new)
|
if (new)
|
||||||
blk_mq_free_tag_set(ctrl->tagset);
|
blk_mq_free_tag_set(ctrl->tagset);
|
||||||
@ -1953,8 +1953,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
|
|||||||
{
|
{
|
||||||
nvme_tcp_stop_queue(ctrl, 0);
|
nvme_tcp_stop_queue(ctrl, 0);
|
||||||
if (remove) {
|
if (remove) {
|
||||||
blk_cleanup_queue(ctrl->admin_q);
|
blk_mq_destroy_queue(ctrl->admin_q);
|
||||||
blk_cleanup_queue(ctrl->fabrics_q);
|
blk_mq_destroy_queue(ctrl->fabrics_q);
|
||||||
blk_mq_free_tag_set(ctrl->admin_tagset);
|
blk_mq_free_tag_set(ctrl->admin_tagset);
|
||||||
}
|
}
|
||||||
nvme_tcp_free_admin_queue(ctrl);
|
nvme_tcp_free_admin_queue(ctrl);
|
||||||
@ -2012,10 +2012,10 @@ out_stop_queue:
|
|||||||
nvme_cancel_admin_tagset(ctrl);
|
nvme_cancel_admin_tagset(ctrl);
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->admin_q);
|
blk_mq_destroy_queue(ctrl->admin_q);
|
||||||
out_cleanup_fabrics_q:
|
out_cleanup_fabrics_q:
|
||||||
if (new)
|
if (new)
|
||||||
blk_cleanup_queue(ctrl->fabrics_q);
|
blk_mq_destroy_queue(ctrl->fabrics_q);
|
||||||
out_free_tagset:
|
out_free_tagset:
|
||||||
if (new)
|
if (new)
|
||||||
blk_mq_free_tag_set(ctrl->admin_tagset);
|
blk_mq_free_tag_set(ctrl->admin_tagset);
|
||||||
|
@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
|
|||||||
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
|
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
|
||||||
return;
|
return;
|
||||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
|
|||||||
mutex_unlock(&nvme_loop_ctrl_mutex);
|
mutex_unlock(&nvme_loop_ctrl_mutex);
|
||||||
|
|
||||||
if (nctrl->tagset) {
|
if (nctrl->tagset) {
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
}
|
}
|
||||||
kfree(ctrl->queues);
|
kfree(ctrl->queues);
|
||||||
@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
|||||||
|
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
|
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
|
||||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
|
||||||
out_cleanup_fabrics_q:
|
out_cleanup_fabrics_q:
|
||||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
|
||||||
out_free_tagset:
|
out_free_tagset:
|
||||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||||
out_free_sq:
|
out_free_sq:
|
||||||
@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_cleanup_connect_q:
|
out_cleanup_connect_q:
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
|
||||||
out_free_tagset:
|
out_free_tagset:
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
out_destroy_queues:
|
out_destroy_queues:
|
||||||
|
@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
|
|||||||
static void dasd_free_queue(struct dasd_block *block)
|
static void dasd_free_queue(struct dasd_block *block)
|
||||||
{
|
{
|
||||||
if (block->request_queue) {
|
if (block->request_queue) {
|
||||||
blk_cleanup_queue(block->request_queue);
|
blk_mq_destroy_queue(block->request_queue);
|
||||||
blk_mq_free_tag_set(&block->tag_set);
|
blk_mq_free_tag_set(&block->tag_set);
|
||||||
block->request_queue = NULL;
|
block->request_queue = NULL;
|
||||||
}
|
}
|
||||||
|
@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
|||||||
if (base->devindex >= DASD_PER_MAJOR)
|
if (base->devindex >= DASD_PER_MAJOR)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
|
gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
|
||||||
&dasd_bio_compl_lkclass);
|
&dasd_bio_compl_lkclass);
|
||||||
if (!gdp)
|
if (!gdp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -163,7 +163,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
|
|||||||
* Requeue this command. It will go before all other commands
|
* Requeue this command. It will go before all other commands
|
||||||
* that are already in the queue. Schedule requeue work under
|
* that are already in the queue. Schedule requeue work under
|
||||||
* lock such that the kblockd_schedule_work() call happens
|
* lock such that the kblockd_schedule_work() call happens
|
||||||
* before blk_cleanup_queue() finishes.
|
* before blk_mq_destroy_queue() finishes.
|
||||||
*/
|
*/
|
||||||
cmd->result = 0;
|
cmd->result = 0;
|
||||||
|
|
||||||
@ -424,9 +424,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
|
|||||||
* it and the queue. Mitigate by taking a reference to the
|
* it and the queue. Mitigate by taking a reference to the
|
||||||
* queue and never touching the sdev again after we drop the
|
* queue and never touching the sdev again after we drop the
|
||||||
* host lock. Note: if __scsi_remove_device() invokes
|
* host lock. Note: if __scsi_remove_device() invokes
|
||||||
* blk_cleanup_queue() before the queue is run from this
|
* blk_mq_destroy_queue() before the queue is run from this
|
||||||
* function then blk_run_queue() will return immediately since
|
* function then blk_run_queue() will return immediately since
|
||||||
* blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
|
* blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
|
||||||
*/
|
*/
|
||||||
slq = sdev->request_queue;
|
slq = sdev->request_queue;
|
||||||
if (!blk_get_queue(slq))
|
if (!blk_get_queue(slq))
|
||||||
|
@ -1475,7 +1475,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
|
|||||||
scsi_device_set_state(sdev, SDEV_DEL);
|
scsi_device_set_state(sdev, SDEV_DEL);
|
||||||
mutex_unlock(&sdev->state_mutex);
|
mutex_unlock(&sdev->state_mutex);
|
||||||
|
|
||||||
blk_cleanup_queue(sdev->request_queue);
|
blk_mq_destroy_queue(sdev->request_queue);
|
||||||
cancel_work_sync(&sdev->requeue_work);
|
cancel_work_sync(&sdev->requeue_work);
|
||||||
|
|
||||||
if (sdev->host->hostt->slave_destroy)
|
if (sdev->host->hostt->slave_destroy)
|
||||||
|
@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev)
|
|||||||
if (!sdkp)
|
if (!sdkp)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
|
gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
|
||||||
&sd_bio_compl_lkclass);
|
&sd_bio_compl_lkclass);
|
||||||
if (!gd)
|
if (!gd)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
@ -624,8 +624,8 @@ static int sr_probe(struct device *dev)
|
|||||||
if (!cd)
|
if (!cd)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
|
disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
|
||||||
&sr_bio_compl_lkclass);
|
&sr_bio_compl_lkclass);
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto fail_free;
|
goto fail_free;
|
||||||
mutex_init(&cd->lock);
|
mutex_init(&cd->lock);
|
||||||
|
@ -9487,7 +9487,7 @@ void ufshcd_remove(struct ufs_hba *hba)
|
|||||||
ufs_bsg_remove(hba);
|
ufs_bsg_remove(hba);
|
||||||
ufshpb_remove(hba);
|
ufshpb_remove(hba);
|
||||||
ufs_sysfs_remove_nodes(hba->dev);
|
ufs_sysfs_remove_nodes(hba->dev);
|
||||||
blk_cleanup_queue(hba->tmf_queue);
|
blk_mq_destroy_queue(hba->tmf_queue);
|
||||||
blk_mq_free_tag_set(&hba->tmf_tag_set);
|
blk_mq_free_tag_set(&hba->tmf_tag_set);
|
||||||
scsi_remove_host(hba->host);
|
scsi_remove_host(hba->host);
|
||||||
/* disable interrupts */
|
/* disable interrupts */
|
||||||
@ -9783,7 +9783,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_tmf_queue:
|
free_tmf_queue:
|
||||||
blk_cleanup_queue(hba->tmf_queue);
|
blk_mq_destroy_queue(hba->tmf_queue);
|
||||||
free_tmf_tag_set:
|
free_tmf_tag_set:
|
||||||
blk_mq_free_tag_set(&hba->tmf_tag_set);
|
blk_mq_free_tag_set(&hba->tmf_tag_set);
|
||||||
out_remove_scsi_host:
|
out_remove_scsi_host:
|
||||||
|
@ -686,10 +686,13 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
|
|||||||
\
|
\
|
||||||
__blk_mq_alloc_disk(set, queuedata, &__key); \
|
__blk_mq_alloc_disk(set, queuedata, &__key); \
|
||||||
})
|
})
|
||||||
|
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
|
||||||
|
struct lock_class_key *lkclass);
|
||||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||||
struct request_queue *q);
|
struct request_queue *q);
|
||||||
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
||||||
|
void blk_mq_destroy_queue(struct request_queue *);
|
||||||
|
|
||||||
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
|
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
|
||||||
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
|
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
|
||||||
|
@ -148,6 +148,7 @@ struct gendisk {
|
|||||||
#define GD_NATIVE_CAPACITY 3
|
#define GD_NATIVE_CAPACITY 3
|
||||||
#define GD_ADDED 4
|
#define GD_ADDED 4
|
||||||
#define GD_SUPPRESS_PART_SCAN 5
|
#define GD_SUPPRESS_PART_SCAN 5
|
||||||
|
#define GD_OWNS_QUEUE 6
|
||||||
|
|
||||||
struct mutex open_mutex; /* open/close mutex */
|
struct mutex open_mutex; /* open/close mutex */
|
||||||
unsigned open_partitions; /* number of open partitions */
|
unsigned open_partitions; /* number of open partitions */
|
||||||
@ -815,8 +816,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
|
|||||||
|
|
||||||
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
|
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
|
||||||
|
|
||||||
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
|
||||||
struct lock_class_key *lkclass);
|
|
||||||
void put_disk(struct gendisk *disk);
|
void put_disk(struct gendisk *disk);
|
||||||
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
|
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
|
||||||
|
|
||||||
@ -933,7 +932,6 @@ static inline unsigned int blk_chunk_sectors_left(sector_t offset,
|
|||||||
/*
|
/*
|
||||||
* Access functions for manipulating queue properties
|
* Access functions for manipulating queue properties
|
||||||
*/
|
*/
|
||||||
extern void blk_cleanup_queue(struct request_queue *);
|
|
||||||
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
|
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
|
||||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user