mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
nvme: split nvme_uninit_ctrl into stop and uninit
Usually before we teardown the controller we want to: 1. complete/cancel any ctrl inflight works 2. remove ctrl namespaces (only for removal though, resets shouldn't remove any namespaces). but we do not want to destroy the controller device as we might use it for logging during the teardown stage. This patch adds nvme_start_ctrl() which queues inflight controller works (aen, ns scan, queue start and keep-alive if kato is set) and nvme_stop_ctrl() which cancels the works namespace removal is left to the callers to handle. Move nvme_uninit_ctrl after we are done with the controller device. Reviewed-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:
parent
9b3e990584
commit
d09f2b45f3
@ -2591,12 +2591,29 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
|
|||||||
spin_unlock(&dev_list_lock);
|
spin_unlock(&dev_list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
|
nvme_stop_keep_alive(ctrl);
|
||||||
flush_work(&ctrl->async_event_work);
|
flush_work(&ctrl->async_event_work);
|
||||||
flush_work(&ctrl->scan_work);
|
flush_work(&ctrl->scan_work);
|
||||||
nvme_remove_namespaces(ctrl);
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
|
||||||
|
|
||||||
|
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
if (ctrl->kato)
|
||||||
|
nvme_start_keep_alive(ctrl);
|
||||||
|
|
||||||
|
if (ctrl->queue_count > 1) {
|
||||||
|
nvme_queue_scan(ctrl);
|
||||||
|
nvme_queue_async_events(ctrl);
|
||||||
|
nvme_start_queues(ctrl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||||
|
|
||||||
|
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
|
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
|
||||||
|
|
||||||
spin_lock(&dev_list_lock);
|
spin_lock(&dev_list_lock);
|
||||||
|
@ -2232,7 +2232,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
out_delete_hw_queues:
|
out_delete_hw_queues:
|
||||||
nvme_fc_delete_hw_io_queues(ctrl);
|
nvme_fc_delete_hw_io_queues(ctrl);
|
||||||
out_cleanup_blk_queue:
|
out_cleanup_blk_queue:
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
||||||
out_free_tag_set:
|
out_free_tag_set:
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
@ -2366,8 +2365,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
goto out_disconnect_admin_queue;
|
goto out_disconnect_admin_queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_start_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
/* FC-NVME supports normal SGL Data Block Descriptors */
|
/* FC-NVME supports normal SGL Data Block Descriptors */
|
||||||
|
|
||||||
if (opts->queue_size > ctrl->ctrl.maxcmd) {
|
if (opts->queue_size > ctrl->ctrl.maxcmd) {
|
||||||
@ -2401,17 +2398,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
|
|
||||||
ctrl->ctrl.nr_reconnects = 0;
|
ctrl->ctrl.nr_reconnects = 0;
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_start_queues(&ctrl->ctrl);
|
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0; /* Success */
|
return 0; /* Success */
|
||||||
|
|
||||||
out_term_aen_ops:
|
out_term_aen_ops:
|
||||||
nvme_fc_term_aen_ops(ctrl);
|
nvme_fc_term_aen_ops(ctrl);
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
out_disconnect_admin_queue:
|
out_disconnect_admin_queue:
|
||||||
/* send a Disconnect(association) LS to fc-nvme target */
|
/* send a Disconnect(association) LS to fc-nvme target */
|
||||||
nvme_fc_xmt_disconnect_assoc(ctrl);
|
nvme_fc_xmt_disconnect_assoc(ctrl);
|
||||||
@ -2434,8 +2426,6 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&ctrl->lock, flags);
|
spin_lock_irqsave(&ctrl->lock, flags);
|
||||||
ctrl->flags |= FCCTRL_TERMIO;
|
ctrl->flags |= FCCTRL_TERMIO;
|
||||||
ctrl->iocnt = 0;
|
ctrl->iocnt = 0;
|
||||||
@ -2517,7 +2507,8 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
|
|||||||
|
|
||||||
cancel_work_sync(&ctrl->ctrl.reset_work);
|
cancel_work_sync(&ctrl->ctrl.reset_work);
|
||||||
cancel_delayed_work_sync(&ctrl->connect_work);
|
cancel_delayed_work_sync(&ctrl->connect_work);
|
||||||
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
|
nvme_remove_namespaces(&ctrl->ctrl);
|
||||||
/*
|
/*
|
||||||
* kill the association on the link side. this will block
|
* kill the association on the link side. this will block
|
||||||
* waiting for io to terminate
|
* waiting for io to terminate
|
||||||
@ -2612,6 +2603,7 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
|
|||||||
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
|
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
/* will block will waiting for io to terminate */
|
/* will block will waiting for io to terminate */
|
||||||
nvme_fc_delete_association(ctrl);
|
nvme_fc_delete_association(ctrl);
|
||||||
|
|
||||||
|
@ -280,6 +280,8 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
|
|||||||
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||||
const struct nvme_ctrl_ops *ops, unsigned long quirks);
|
const struct nvme_ctrl_ops *ops, unsigned long quirks);
|
||||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
|
||||||
|
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
|
||||||
|
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
|
||||||
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_init_identify(struct nvme_ctrl *ctrl);
|
int nvme_init_identify(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
|
@ -2134,15 +2134,6 @@ static void nvme_reset_work(struct work_struct *work)
|
|||||||
if (result)
|
if (result)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* A controller that can not execute IO typically requires user
|
|
||||||
* intervention to correct. For such degraded controllers, the driver
|
|
||||||
* should not submit commands the user did not request, so skip
|
|
||||||
* registering for asynchronous event notification on this condition.
|
|
||||||
*/
|
|
||||||
if (dev->online_queues > 1)
|
|
||||||
nvme_queue_async_events(&dev->ctrl);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Keep the controller around but remove all namespaces if we don't have
|
* Keep the controller around but remove all namespaces if we don't have
|
||||||
* any working I/O queue.
|
* any working I/O queue.
|
||||||
@ -2163,8 +2154,7 @@ static void nvme_reset_work(struct work_struct *work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->online_queues > 1)
|
nvme_start_ctrl(&dev->ctrl);
|
||||||
nvme_queue_scan(&dev->ctrl);
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -2341,11 +2331,13 @@ static void nvme_remove(struct pci_dev *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
flush_work(&dev->ctrl.reset_work);
|
flush_work(&dev->ctrl.reset_work);
|
||||||
nvme_uninit_ctrl(&dev->ctrl);
|
nvme_stop_ctrl(&dev->ctrl);
|
||||||
|
nvme_remove_namespaces(&dev->ctrl);
|
||||||
nvme_dev_disable(dev, true);
|
nvme_dev_disable(dev, true);
|
||||||
nvme_free_host_mem(dev);
|
nvme_free_host_mem(dev);
|
||||||
nvme_dev_remove_admin(dev);
|
nvme_dev_remove_admin(dev);
|
||||||
nvme_free_queues(dev, 0);
|
nvme_free_queues(dev, 0);
|
||||||
|
nvme_uninit_ctrl(&dev->ctrl);
|
||||||
nvme_release_prp_pools(dev);
|
nvme_release_prp_pools(dev);
|
||||||
nvme_dev_unmap(dev);
|
nvme_dev_unmap(dev);
|
||||||
nvme_put_ctrl(&dev->ctrl);
|
nvme_put_ctrl(&dev->ctrl);
|
||||||
|
@ -732,8 +732,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto requeue;
|
goto requeue;
|
||||||
|
|
||||||
nvme_start_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
ret = nvme_rdma_init_io_queues(ctrl);
|
ret = nvme_rdma_init_io_queues(ctrl);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -751,10 +749,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
ctrl->ctrl.nr_reconnects = 0;
|
ctrl->ctrl.nr_reconnects = 0;
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
|
dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
|
||||||
|
|
||||||
@ -772,7 +767,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
|||||||
struct nvme_rdma_ctrl, err_work);
|
struct nvme_rdma_ctrl, err_work);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
|
|
||||||
for (i = 0; i < ctrl->ctrl.queue_count; i++)
|
for (i = 0; i < ctrl->ctrl.queue_count; i++)
|
||||||
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
||||||
@ -1603,8 +1598,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_cleanup_queue;
|
goto out_cleanup_queue;
|
||||||
|
|
||||||
nvme_start_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
@ -1622,7 +1615,6 @@ out_free_queue:
|
|||||||
|
|
||||||
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
cancel_work_sync(&ctrl->err_work);
|
cancel_work_sync(&ctrl->err_work);
|
||||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||||
|
|
||||||
@ -1645,10 +1637,12 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
|||||||
|
|
||||||
static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
||||||
{
|
{
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
|
nvme_remove_namespaces(&ctrl->ctrl);
|
||||||
if (shutdown)
|
if (shutdown)
|
||||||
nvme_rdma_shutdown_ctrl(ctrl);
|
nvme_rdma_shutdown_ctrl(ctrl);
|
||||||
|
|
||||||
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
if (ctrl->ctrl.tagset) {
|
if (ctrl->ctrl.tagset) {
|
||||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
||||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||||
@ -1710,6 +1704,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||||||
int ret;
|
int ret;
|
||||||
bool changed;
|
bool changed;
|
||||||
|
|
||||||
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
nvme_rdma_shutdown_ctrl(ctrl);
|
nvme_rdma_shutdown_ctrl(ctrl);
|
||||||
|
|
||||||
ret = nvme_rdma_configure_admin_queue(ctrl);
|
ret = nvme_rdma_configure_admin_queue(ctrl);
|
||||||
@ -1739,11 +1734,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_start_queues(&ctrl->ctrl);
|
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -1931,15 +1922,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
|
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
|
||||||
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ctrl->ctrl;
|
return &ctrl->ctrl;
|
||||||
|
|
||||||
out_remove_admin_queue:
|
out_remove_admin_queue:
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
nvme_rdma_destroy_admin_queue(ctrl);
|
nvme_rdma_destroy_admin_queue(ctrl);
|
||||||
out_kfree_queues:
|
out_kfree_queues:
|
||||||
kfree(ctrl->queues);
|
kfree(ctrl->queues);
|
||||||
|
@ -407,8 +407,6 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_cleanup_queue;
|
goto out_cleanup_queue;
|
||||||
|
|
||||||
nvme_start_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_cleanup_queue:
|
out_cleanup_queue:
|
||||||
@ -422,8 +420,6 @@ out_free_sq:
|
|||||||
|
|
||||||
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
|
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
||||||
|
|
||||||
if (ctrl->ctrl.queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||||
@ -446,8 +442,10 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
|
|||||||
struct nvme_loop_ctrl *ctrl = container_of(work,
|
struct nvme_loop_ctrl *ctrl = container_of(work,
|
||||||
struct nvme_loop_ctrl, delete_work);
|
struct nvme_loop_ctrl, delete_work);
|
||||||
|
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
|
nvme_remove_namespaces(&ctrl->ctrl);
|
||||||
nvme_loop_shutdown_ctrl(ctrl);
|
nvme_loop_shutdown_ctrl(ctrl);
|
||||||
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
nvme_put_ctrl(&ctrl->ctrl);
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,6 +493,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||||||
bool changed;
|
bool changed;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
nvme_stop_ctrl(&ctrl->ctrl);
|
||||||
nvme_loop_shutdown_ctrl(ctrl);
|
nvme_loop_shutdown_ctrl(ctrl);
|
||||||
|
|
||||||
ret = nvme_loop_configure_admin_queue(ctrl);
|
ret = nvme_loop_configure_admin_queue(ctrl);
|
||||||
@ -515,10 +514,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
|
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
|
|
||||||
nvme_start_queues(&ctrl->ctrl);
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -653,10 +649,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
|||||||
list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
|
list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
|
||||||
mutex_unlock(&nvme_loop_ctrl_mutex);
|
mutex_unlock(&nvme_loop_ctrl_mutex);
|
||||||
|
|
||||||
if (opts->nr_io_queues) {
|
nvme_start_ctrl(&ctrl->ctrl);
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ctrl->ctrl;
|
return &ctrl->ctrl;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user