From 8be007c8e0911d0450b402ca8cbb1a8cbd00e8f2 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Sun, 22 Sep 2024 07:18:00 -0700 Subject: [PATCH 01/19] block: fix blk_rq_map_integrity_sg kernel-doc Fix the documentation to match the new function signature. Fixes: 76c313f658d2752 ("blk-integrity: improved sg segment mapping") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240922141800.3622319-1-kbusch@meta.com Signed-off-by: Jens Axboe --- block/blk-integrity.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 0a2b1c5d0ebf..83b696ba0cac 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -56,8 +56,7 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) /** * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist - * @q: request queue - * @bio: bio with integrity metadata attached + * @rq: request to map * @sglist: target scatterlist * * Description: Map the integrity vectors in request into a From e38dad438fc08162e20c600ae899e9e60688f72e Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Sat, 21 Sep 2024 12:35:33 +0530 Subject: [PATCH 02/19] nvmet-passthru: clear EUID/NGUID/UUID while using loop target When nvme passthru is configured using loop target, the clear_ids attribute is, by default, set to true. This attribute would ensure that EUID/NGUID/UUID is cleared for the loop passthru target. The newer NVMe disk supporting the NVMe spec 1.3 or higher, typically, implements the support for "Namespace Identification Descriptor list" command. This command when issued from host returns EUID/NGUID/UUID assigned to the inquired namespace. Not clearing these values, while using nvme passthru using loop target, would result in NVMe host driver rejecting the namespace. This check was implemented in the commit 2079f41ec6ff ("nvme: check that EUI/GUID/UUID are globally unique"). The fix implemented in this commit ensure that when host issues ns-id descriptor list command, the EUID/NGUID/UUID are cleared by passthru target. In fact, the function nvmet_passthru_override_id_descs() which clears those unique ids already exits, so we just need to ensure that ns-id descriptor list command falls through the corretc code path. And while we're at it, we also combines the three passthru admin command cases together which shares the same code. Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/target/passthru.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 24d0e2418d2e..0f9b280c438d 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) break; case nvme_admin_identify: switch (req->cmd->identify.cns) { - case NVME_ID_CNS_CTRL: - req->execute = nvmet_passthru_execute_cmd; - req->p.use_workqueue = true; - return NVME_SC_SUCCESS; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_ZNS: @@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) return NVME_SC_SUCCESS; } return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + case NVME_ID_CNS_CTRL: case NVME_ID_CNS_NS: + case NVME_ID_CNS_NS_DESC_LIST: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; From 782373ba27660ba7d330208cf5509ece6feb4545 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 2 Oct 2024 13:51:41 +0900 Subject: [PATCH 03/19] nvme: tcp: avoid race between queue_lock lock and destroy Commit 76d54bf20cdc ("nvme-tcp: don't access released socket during error recovery") added a mutex_lock() call for the queue->queue_lock in nvme_tcp_get_address(). However, the mutex_lock() races with mutex_destroy() in nvme_tcp_free_queue(), and causes the WARN below. DEBUG_LOCKS_WARN_ON(lock->magic != lock) WARNING: CPU: 3 PID: 34077 at kernel/locking/mutex.c:587 __mutex_lock+0xcf0/0x1220 Modules linked in: nvmet_tcp nvmet nvme_tcp nvme_fabrics iw_cm ib_cm ib_core pktcdvd nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables qrtr sunrpc ppdev 9pnet_virtio 9pnet pcspkr netfs parport_pc parport e1000 i2c_piix4 i2c_smbus loop fuse nfnetlink zram bochs drm_vram_helper drm_ttm_helper ttm drm_kms_helper xfs drm sym53c8xx floppy nvme scsi_transport_spi nvme_core nvme_auth serio_raw ata_generic pata_acpi dm_multipath qemu_fw_cfg [last unloaded: ib_uverbs] CPU: 3 UID: 0 PID: 34077 Comm: udisksd Not tainted 6.11.0-rc7 #319 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-2.fc40 04/01/2014 RIP: 0010:__mutex_lock+0xcf0/0x1220 Code: 08 84 d2 0f 85 c8 04 00 00 8b 15 ef b6 c8 01 85 d2 0f 85 78 f4 ff ff 48 c7 c6 20 93 ee af 48 c7 c7 60 91 ee af e8 f0 a7 6d fd <0f> 0b e9 5e f4 ff ff 48 b8 00 00 00 00 00 fc ff df 4c 89 f2 48 c1 RSP: 0018:ffff88811305f760 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffff88812c652058 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000001 RBP: ffff88811305f8b0 R08: 0000000000000001 R09: ffffed1075c36341 R10: ffff8883ae1b1a0b R11: 0000000000010498 R12: 0000000000000000 R13: 0000000000000000 R14: dffffc0000000000 R15: ffff88812c652058 FS: 00007f9713ae4980(0000) GS:ffff8883ae180000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fcd78483c7c CR3: 0000000122c38000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? __warn.cold+0x5b/0x1af ? __mutex_lock+0xcf0/0x1220 ? report_bug+0x1ec/0x390 ? handle_bug+0x3c/0x80 ? exc_invalid_op+0x13/0x40 ? asm_exc_invalid_op+0x16/0x20 ? __mutex_lock+0xcf0/0x1220 ? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] ? __pfx___mutex_lock+0x10/0x10 ? __lock_acquire+0xd6a/0x59e0 ? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] ? __pfx_nvme_tcp_get_address+0x10/0x10 [nvme_tcp] nvme_sysfs_show_address+0x81/0xc0 [nvme_core] dev_attr_show+0x42/0x80 ? __asan_memset+0x1f/0x40 sysfs_kf_seq_show+0x1f0/0x370 seq_read_iter+0x2cb/0x1130 ? rw_verify_area+0x3b1/0x590 ? __mutex_lock+0x433/0x1220 vfs_read+0x6a6/0xa20 ? lockdep_hardirqs_on+0x78/0x100 ? __pfx_vfs_read+0x10/0x10 ksys_read+0xf7/0x1d0 ? __pfx_ksys_read+0x10/0x10 ? __x64_sys_openat+0x105/0x1d0 do_syscall_64+0x93/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? __pfx_ksys_read+0x10/0x10 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? do_syscall_64+0x9f/0x180 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7f9713f55cfa Code: 55 48 89 e5 48 83 ec 20 48 89 55 e8 48 89 75 f0 89 7d f8 e8 e8 74 f8 ff 48 8b 55 e8 48 8b 75 f0 41 89 c0 8b 7d f8 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 2e 44 89 c7 48 89 45 f8 e8 42 75 f8 ff 48 8b RSP: 002b:00007ffd7f512e70 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: 000055c38f316859 RCX: 00007f9713f55cfa RDX: 0000000000000fff RSI: 00007ffd7f512eb0 RDI: 0000000000000011 RBP: 00007ffd7f512e90 R08: 0000000000000000 R09: 00000000ffffffff R10: 0000000000000000 R11: 0000000000000246 R12: 000055c38f317148 R13: 0000000000000000 R14: 00007f96f4004f30 R15: 000055c3b6b623c0 The WARN is observed when the blktests test case nvme/014 is repeated with tcp transport. It is rare, and 200 times repeat is required to recreate in some test environments. To avoid the WARN, check the NVME_TCP_Q_LIVE flag before locking queue->queue_lock. The flag is cleared long time before the lock gets destroyed. Signed-off-by: Hannes Reinecke Signed-off-by: Shin'ichiro Kawasaki Signed-off-by: Keith Busch --- drivers/nvme/host/tcp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 89c44413c593..3e416af2659f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len = nvmf_get_address(ctrl, buf, size); + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return len; + mutex_lock(&queue->queue_lock); - if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) - goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) @@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } -done: + mutex_unlock(&queue->queue_lock); return len; From b402328a24ee7193a8ab84277c0c90ae16768126 Mon Sep 17 00:00:00 2001 From: SurajSonawane2415 Date: Mon, 7 Oct 2024 16:44:16 +0530 Subject: [PATCH 04/19] block: Fix elevator_get_default() checking for NULL q->tag_set elevator_get_default() and elv_support_iosched() both check for whether or not q->tag_set is non-NULL, however it's not possible for them to be NULL. This messes up some static checkers, as the checking of tag_set isn't consistent. Remove the checks, which both simplifies the logic and avoids checker errors. Signed-off-by: SurajSonawane2415 Link: https://lore.kernel.org/r/20241007111416.13814-1-surajsonawane0215@gmail.com Signed-off-by: Jens Axboe --- block/elevator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/elevator.c b/block/elevator.c index 4122026b11f1..60497e3b25b0 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -551,7 +551,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); static inline bool elv_support_iosched(struct request_queue *q) { if (!queue_is_mq(q) || - (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) + (q->tag_set->flags & BLK_MQ_F_NO_SCHED)) return false; return true; } @@ -562,7 +562,7 @@ static inline bool elv_support_iosched(struct request_queue *q) */ static struct elevator_type *elevator_get_default(struct request_queue *q) { - if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) + if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) return NULL; if (q->nr_hw_queues != 1 && From 40f0e5dc2f3c866bef3fccf398114b90c6136dfa Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Tue, 8 Oct 2024 17:37:08 +0800 Subject: [PATCH 05/19] nvmet-rdma: use sbitmap to replace rsp free list We can use sbitmap to manage all the nvmet_rdma_rsp instead of using free lists and spinlock, and we can use an additional tag to determine whether the nvmet_rdma_rsp is extra allocated. In addition, performance has improved: 1. testing environment is local rxe rdma devie and mem-based backstore device. 2. fio command, test the average 5 times: fio -filename=/dev/nvme0n1 --ioengine=libaio -direct=1 -size=1G -name=1 -thread -runtime=60 -time_based -rw=read -numjobs=16 -iodepth=128 -bs=4k -group_reporting 3. Before: 241k IOPS, After: 256k IOPS, an increase of about 5%. Signed-off-by: Guixin Liu Reviewed-by: Jens Axboe --- drivers/nvme/target/rdma.c | 56 ++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 1b6264fa5803..001831452f0d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -39,6 +39,8 @@ #define NVMET_RDMA_BACKLOG 128 +#define NVMET_RDMA_DISCRETE_RSP_TAG -1 + struct nvmet_rdma_srq; struct nvmet_rdma_cmd { @@ -75,7 +77,7 @@ struct nvmet_rdma_rsp { u32 invalidate_rkey; struct list_head wait_list; - struct list_head free_list; + int tag; }; enum nvmet_rdma_queue_state { @@ -98,8 +100,7 @@ struct nvmet_rdma_queue { struct nvmet_sq nvme_sq; struct nvmet_rdma_rsp *rsps; - struct list_head free_rsps; - spinlock_t rsps_lock; + struct sbitmap rsp_tags; struct nvmet_rdma_cmd *cmds; struct work_struct release_work; @@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r); + struct nvmet_rdma_rsp *r, + int tag); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) static inline struct nvmet_rdma_rsp * nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) { - struct nvmet_rdma_rsp *rsp; - unsigned long flags; + struct nvmet_rdma_rsp *rsp = NULL; + int tag; - spin_lock_irqsave(&queue->rsps_lock, flags); - rsp = list_first_entry_or_null(&queue->free_rsps, - struct nvmet_rdma_rsp, free_list); - if (likely(rsp)) - list_del(&rsp->free_list); - spin_unlock_irqrestore(&queue->rsps_lock, flags); + tag = sbitmap_get(&queue->rsp_tags); + if (tag >= 0) + rsp = &queue->rsps[tag]; if (unlikely(!rsp)) { int ret; @@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; - ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, + NVMET_RDMA_DISCRETE_RSP_TAG); if (unlikely(ret)) { kfree(rsp); return NULL; } - - rsp->allocated = true; } return rsp; @@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) static inline void nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { - unsigned long flags; - - if (unlikely(rsp->allocated)) { + if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) { nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } - spin_lock_irqsave(&rsp->queue->rsps_lock, flags); - list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); - spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); + sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); } static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, @@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r) + struct nvmet_rdma_rsp *r, int tag) { /* NVMe CQE / RDMA SEND */ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); @@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, r->read_cqe.done = nvmet_rdma_read_data_done; /* Data Out / RDMA WRITE */ r->write_cqe.done = nvmet_rdma_write_data_done; + r->tag = tag; return 0; @@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int nr_rsps = queue->recv_queue_size * 2; - int ret = -EINVAL, i; + int ret = -ENOMEM, i; + + if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, + NUMA_NO_NODE, false, true)) + goto out; queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) - goto out; + goto out_free_sbitmap; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - ret = nvmet_rdma_alloc_rsp(ndev, rsp); + ret = nvmet_rdma_alloc_rsp(ndev, rsp, i); if (ret) goto out_free; - - list_add_tail(&rsp->free_list, &queue->free_rsps); } return 0; @@ -477,6 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) while (--i >= 0) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); +out_free_sbitmap: + sbitmap_free(&queue->rsp_tags); out: return ret; } @@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) for (i = 0; i < nr_rsps; i++) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); + sbitmap_free(&queue->rsp_tags); } static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, @@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list); spin_lock_init(&queue->rsp_wr_wait_lock); - INIT_LIST_HEAD(&queue->free_rsps); - spin_lock_init(&queue->rsps_lock); INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); From 9c7072df535bd7db475d32c7da76c7d241c08ab1 Mon Sep 17 00:00:00 2001 From: Tokunori Ikegami Date: Sat, 5 Oct 2024 20:30:22 +0900 Subject: [PATCH 06/19] nvme: delete unnecessary fallthru comment Signed-off-by: Tokunori Ikegami Signed-off-by: Keith Busch --- drivers/nvme/host/multipath.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 48e7a8906d01..bad1620fbbfc 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -431,7 +431,6 @@ static bool nvme_available_path(struct nvme_ns_head *head) case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: - /* fallthru */ return true; default: break; From 0ce96a6708f34280a536263ee5c67e20c433dcce Mon Sep 17 00:00:00 2001 From: Greg Joyce Date: Mon, 7 Oct 2024 14:33:24 -0500 Subject: [PATCH 07/19] nvme: disable CC.CRIME (NVME_CC_CRIME) Disable NVME_CC_CRIME so that CSTS.RDY indicates that the media is ready and able to handle commands without returning NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Signed-off-by: Greg Joyce Reviewed-by: Nilay Shroff Tested-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ba6508455e18..9e7e63e10e5a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2458,8 +2458,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) - ctrl->ctrl_config |= NVME_CC_CRIME; + /* + * Setting CRIME results in CSTS.RDY before the media is ready. This + * makes it possible for media related commands to return the error + * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is + * restructured to handle retries, disable CC.CRIME. + */ + ctrl->ctrl_config &= ~NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2489,10 +2494,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) * devices are known to get this wrong. Use the larger of the * two values. */ - if (ctrl->ctrl_config & NVME_CC_CRIME) - ready_timeout = NVME_CRTO_CRIMT(crto); - else - ready_timeout = NVME_CRTO_CRWMT(crto); + ready_timeout = NVME_CRTO_CRWMT(crto); if (ready_timeout < timeout) dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", From 1e3fc2000035ffea0b1b7ec2423706715ab2e7f6 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 10 Oct 2024 21:44:26 +0100 Subject: [PATCH 08/19] drbd: Remove unused conn_lowest_minor conn_lowest_minor() last use was removed by 2011 commit 69a227731a37 ("drbd: Pass a peer device to a number of fuctions") Remove it. Signed-off-by: Dr. David Alan Gilbert Link: https://lore.kernel.org/r/20241010204426.277535-1-linux@treblig.org Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_int.h | 1 - drivers/block/drbd/drbd_main.c | 14 -------------- 2 files changed, 15 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 2a05d955e30b..e21492981f7d 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1364,7 +1364,6 @@ extern struct bio_set drbd_io_bio_set; extern struct mutex resources_mutex; -extern int conn_lowest_minor(struct drbd_connection *connection); extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); extern void drbd_destroy_device(struct kref *kref); extern void drbd_delete_device(struct drbd_device *device); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0d74d75260ef..5bbd312c3e14 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -471,20 +471,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) wait_for_completion(&thi->stop); } -int conn_lowest_minor(struct drbd_connection *connection) -{ - struct drbd_peer_device *peer_device; - int vnr = 0, minor = -1; - - rcu_read_lock(); - peer_device = idr_get_next(&connection->peer_devices, &vnr); - if (peer_device) - minor = device_to_minor(peer_device->device); - rcu_read_unlock(); - - return minor; -} - #ifdef CONFIG_SMP /* * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs From b4ff6e93bfd0093ce3ffc7322e89fbaa8300488f Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 11 Oct 2024 10:01:21 -0700 Subject: [PATCH 09/19] elevator: do not request_module if elevator exists Whenever an I/O elevator is changed, the system attempts to load a module for the new elevator. This occurs regardless of whether the elevator is already loaded or built directly into the kernel. This behavior introduces unnecessary overhead and potential issues. This makes the operation slower, and more error-prone. For instance, making the problem fixed by [1] visible for users that doesn't even rely on modules being available through modules. Do not try to load the ioscheduler if it is already visible. This change brings two main benefits: it improves the performance of elevator changes, and it reduces the likelihood of errors occurring during this process. [1] Commit e3accac1a976 ("block: Fix elv_iosched_local_module handling of "none" scheduler") Fixes: 734e1a860312 ("block: Prevent deadlocks when switching elevators") Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20241011170122.3880087-1-leitao@debian.org Signed-off-by: Jens Axboe --- block/elevator.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/block/elevator.c b/block/elevator.c index 60497e3b25b0..eb624f8ae99d 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -709,13 +709,21 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf, size_t count) { char elevator_name[ELV_NAME_MAX]; + struct elevator_type *found; + const char *name; if (!elv_support_iosched(disk->queue)) return -EOPNOTSUPP; strscpy(elevator_name, buf, sizeof(elevator_name)); + name = strstrip(elevator_name); - request_module("%s-iosched", strstrip(elevator_name)); + spin_lock(&elv_list_lock); + found = __elevator_find(name); + spin_unlock(&elv_list_lock); + + if (!found) + request_module("%s-iosched", name); return 0; } From ee7ff15bf507d4cf9a2b11b00690dfe6046ad325 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 11 Oct 2024 08:56:15 -0700 Subject: [PATCH 10/19] elevator: Remove argument from elevator_find_get Commit e4eb37cc0f3ed ("block: Remove elevator required features") removed the usage of `struct request_queue` from elevator_find_get(), but didn't removed the argument. Remove the "struct request_queue *q" argument from elevator_find_get() given it is useless. Fixes: e4eb37cc0f3e ("block: Remove elevator required features") Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20241011155615.3361143-1-leitao@debian.org Signed-off-by: Jens Axboe --- block/elevator.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/block/elevator.c b/block/elevator.c index eb624f8ae99d..9430cde13d1a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -106,8 +106,7 @@ static struct elevator_type *__elevator_find(const char *name) return NULL; } -static struct elevator_type *elevator_find_get(struct request_queue *q, - const char *name) +static struct elevator_type *elevator_find_get(const char *name) { struct elevator_type *e; @@ -569,7 +568,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) !blk_mq_is_shared_tags(q->tag_set->flags)) return NULL; - return elevator_find_get(q, "mq-deadline"); + return elevator_find_get("mq-deadline"); } /* @@ -697,7 +696,7 @@ static int elevator_change(struct request_queue *q, const char *elevator_name) if (q->elevator && elevator_match(q->elevator->type, elevator_name)) return 0; - e = elevator_find_get(q, elevator_name); + e = elevator_find_get(elevator_name); if (!e) return -EINVAL; ret = elevator_switch(q, e); From c25c0c9035bb8b28c844dfddeda7b8bdbcfcae95 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 14 Oct 2024 08:51:15 +0800 Subject: [PATCH 11/19] blk-mq: setup queue ->tag_set before initializing hctx Commit 7b815817aa58 ("blk-mq: add helper for checking if one CPU is mapped to specified hctx") needs to check queue mapping via tag set in hctx's cpuhp handler. However, q->tag_set may not be setup yet when the cpuhp handler is enabled, then kernel oops is triggered. Fix the issue by setup queue tag_set before initializing hctx. Cc: stable@vger.kernel.org Reported-and-tested-by: Rick Koch Closes: https://lore.kernel.org/linux-block/CANa58eeNDozLaBHKPLxSAhEy__FPfJT_F71W=sEQw49UCrC9PQ@mail.gmail.com Fixes: 7b815817aa58 ("blk-mq: add helper for checking if one CPU is mapped to specified hctx") Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Reviewed-by: John Garry Link: https://lore.kernel.org/r/20241014005115.2699642-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 4b2c8e940f59..cf626e061dd7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4310,6 +4310,12 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, /* mark the queue as mq asap */ q->mq_ops = set->ops; + /* + * ->tag_set has to be setup before initialize hctx, which cpuphp + * handler needs it for checking queue mapping + */ + q->tag_set = set; + if (blk_mq_alloc_ctxs(q)) goto err_exit; @@ -4328,8 +4334,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, INIT_WORK(&q->timeout_work, blk_mq_timeout_work); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); - q->tag_set = set; - q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); From 1f021341eef41e77a633186e9be5223de2ce5d48 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 15 Oct 2024 07:30:17 -0700 Subject: [PATCH 12/19] nvme-multipath: defer partition scanning We need to suppress the partition scan from occuring within the controller's scan_work context. If a path error occurs here, the IO will wait until a path becomes available or all paths are torn down, but that action also occurs within scan_work, so it would deadlock. Defer the partion scan to a different context that does not block scan_work. Reported-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/multipath.c | 39 +++++++++++++++++++++++++++++------ drivers/nvme/host/nvme.h | 1 + 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index bad1620fbbfc..6a15873055b9 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -579,6 +579,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) return ret; } +static void nvme_partition_scan_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, partition_scan_work); + + if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN, + &head->disk->state))) + return; + + mutex_lock(&head->disk->open_mutex); + bdev_disk_changed(head->disk, false); + mutex_unlock(&head->disk->open_mutex); +} + static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = @@ -605,6 +619,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); + INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); /* * Add a multipath node if the subsystems supports multiple controllers. @@ -628,6 +643,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) return PTR_ERR(head->disk); head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; + + /* + * We need to suppress the partition scan from occuring within the + * controller's scan_work context. If a path error occurs here, the IO + * will wait until a path becomes available or all paths are torn down, + * but that action also occurs within scan_work, so it would deadlock. + * Defer the partion scan to a different context that does not block + * scan_work. + */ + set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); return 0; @@ -654,6 +679,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) return; } nvme_add_ns_head_cdev(head); + kblockd_schedule_work(&head->partition_scan_work); } mutex_lock(&head->lock); @@ -973,14 +999,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) return; if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { nvme_cdev_del(&head->cdev, &head->cdev_device); + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); del_gendisk(head->disk); } - /* - * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared - * to allow multipath to fail all I/O. - */ - synchronize_srcu(&head->srcu); - kblockd_schedule_work(&head->requeue_work); } void nvme_mpath_remove_disk(struct nvme_ns_head *head) @@ -990,6 +1016,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); + flush_work(&head->partition_scan_work); put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 313a4f978a2c..093cb423f536 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -494,6 +494,7 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; #define NVME_NSHEAD_DISK_LIVE 0 From e972b08b91ef48488bae9789f03cfedb148667fb Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Tue, 15 Oct 2024 10:59:46 -0700 Subject: [PATCH 13/19] blk-rq-qos: fix crash on rq_qos_wait vs. rq_qos_wake_function race We're seeing crashes from rq_qos_wake_function that look like this: BUG: unable to handle page fault for address: ffffafe180a40084 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 100000067 P4D 100000067 PUD 10027c067 PMD 10115d067 PTE 0 Oops: Oops: 0002 [#1] PREEMPT SMP PTI CPU: 17 UID: 0 PID: 0 Comm: swapper/17 Not tainted 6.12.0-rc3-00013-geca631b8fe80 #11 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 RIP: 0010:_raw_spin_lock_irqsave+0x1d/0x40 Code: 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 0f 1f 44 00 00 41 54 9c 41 5c fa 65 ff 05 62 97 30 4c 31 c0 ba 01 00 00 00 0f b1 17 75 0a 4c 89 e0 41 5c c3 cc cc cc cc 89 c6 e8 2c 0b 00 RSP: 0018:ffffafe180580ca0 EFLAGS: 00010046 RAX: 0000000000000000 RBX: ffffafe180a3f7a8 RCX: 0000000000000011 RDX: 0000000000000001 RSI: 0000000000000003 RDI: ffffafe180a40084 RBP: 0000000000000000 R08: 00000000001e7240 R09: 0000000000000011 R10: 0000000000000028 R11: 0000000000000888 R12: 0000000000000002 R13: ffffafe180a40084 R14: 0000000000000000 R15: 0000000000000003 FS: 0000000000000000(0000) GS:ffff9aaf1f280000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffafe180a40084 CR3: 000000010e428002 CR4: 0000000000770ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: try_to_wake_up+0x5a/0x6a0 rq_qos_wake_function+0x71/0x80 __wake_up_common+0x75/0xa0 __wake_up+0x36/0x60 scale_up.part.0+0x50/0x110 wb_timer_fn+0x227/0x450 ... So rq_qos_wake_function() calls wake_up_process(data->task), which calls try_to_wake_up(), which faults in raw_spin_lock_irqsave(&p->pi_lock). p comes from data->task, and data comes from the waitqueue entry, which is stored on the waiter's stack in rq_qos_wait(). Analyzing the core dump with drgn, I found that the waiter had already woken up and moved on to a completely unrelated code path, clobbering what was previously data->task. Meanwhile, the waker was passing the clobbered garbage in data->task to wake_up_process(), leading to the crash. What's happening is that in between rq_qos_wake_function() deleting the waitqueue entry and calling wake_up_process(), rq_qos_wait() is finding that it already got a token and returning. The race looks like this: rq_qos_wait() rq_qos_wake_function() ============================================================== prepare_to_wait_exclusive() data->got_token = true; list_del_init(&curr->entry); if (data.got_token) break; finish_wait(&rqw->wait, &data.wq); ^- returns immediately because list_empty_careful(&wq_entry->entry) is true ... return, go do something else ... wake_up_process(data->task) (NO LONGER VALID!)-^ Normally, finish_wait() is supposed to synchronize against the waker. But, as noted above, it is returning immediately because the waitqueue entry has already been removed from the waitqueue. The bug is that rq_qos_wake_function() is accessing the waitqueue entry AFTER deleting it. Note that autoremove_wake_function() wakes the waiter and THEN deletes the waitqueue entry, which is the proper order. Fix it by swapping the order. We also need to use list_del_init_careful() to match the list_empty_careful() in finish_wait(). Fixes: 38cfb5a45ee0 ("blk-wbt: improve waking of tasks") Cc: stable@vger.kernel.org Signed-off-by: Omar Sandoval Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/d3bee2463a67b1ee597211823bf7ad3721c26e41.1729014591.git.osandov@fb.com Signed-off-by: Jens Axboe --- block/blk-rq-qos.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 2cfb297d9a62..058f92c4f9d5 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -219,8 +219,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr, data->got_token = true; smp_wmb(); - list_del_init(&curr->entry); wake_up_process(data->task); + list_del_init_careful(&curr->entry); return 1; } From 42aafd8b48adac1c3b20fe5892b1b91b80c1a1e6 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 16 Oct 2024 21:48:47 +0800 Subject: [PATCH 14/19] ublk: don't allow user copy for unprivileged device UBLK_F_USER_COPY requires userspace to call write() on ublk char device for filling request buffer, and unprivileged device can't be trusted. So don't allow user copy for unprivileged device. Cc: stable@vger.kernel.org Fixes: 1172d5b8beca ("ublk: support user copy") Signed-off-by: Ming Lei Link: https://lore.kernel.org/r/20241016134847.2911721-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/block/ublk_drv.c | 11 ++++++++++- include/uapi/linux/ublk_cmd.h | 8 +++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index a6c8e5cc6051..6ba2c1dd1d87 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2380,10 +2380,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) * TODO: provide forward progress for RECOVERY handler, so that * unprivileged device can benefit from it */ - if (info.flags & UBLK_F_UNPRIVILEGED_DEV) + if (info.flags & UBLK_F_UNPRIVILEGED_DEV) { info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE | UBLK_F_USER_RECOVERY); + /* + * For USER_COPY, we depends on userspace to fill request + * buffer by pwrite() to ublk char device, which can't be + * used for unprivileged device + */ + if (info.flags & UBLK_F_USER_COPY) + return -EINVAL; + } + /* the created device is always owned by current user */ ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid); diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index c8dc5f8ea699..12873639ea96 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -175,7 +175,13 @@ /* use ioctl encoding for uring command */ #define UBLK_F_CMD_IOCTL_ENCODE (1UL << 6) -/* Copy between request and user buffer by pread()/pwrite() */ +/* + * Copy between request and user buffer by pread()/pwrite() + * + * Not available for UBLK_F_UNPRIVILEGED_DEV, otherwise userspace may + * deceive us by not filling request buffer, then kernel uninitialized + * data may be leaked. + */ #define UBLK_F_USER_COPY (1UL << 7) /* From 26bc0a81f64ce00fc4342c38eeb2eddaad084dd2 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 15 Oct 2024 13:21:00 +0200 Subject: [PATCH 15/19] nvme-pci: fix race condition between reset and nvme_dev_disable() nvme_dev_disable() modifies the dev->online_queues field, therefore nvme_pci_update_nr_queues() should avoid racing against it, otherwise we could end up passing invalid values to blk_mq_update_nr_hw_queues(). WARNING: CPU: 39 PID: 61303 at drivers/pci/msi/api.c:347 pci_irq_get_affinity+0x187/0x210 Workqueue: nvme-reset-wq nvme_reset_work [nvme] RIP: 0010:pci_irq_get_affinity+0x187/0x210 Call Trace: ? blk_mq_pci_map_queues+0x87/0x3c0 ? pci_irq_get_affinity+0x187/0x210 blk_mq_pci_map_queues+0x87/0x3c0 nvme_pci_map_queues+0x189/0x460 [nvme] blk_mq_update_nr_hw_queues+0x2a/0x40 nvme_reset_work+0x1be/0x2a0 [nvme] Fix the bug by locking the shutdown_lock mutex before using dev->online_queues. Give up if nvme_dev_disable() is running or if it has been executed already. Fixes: 949928c1c731 ("NVMe: Fix possible queue use after freed") Tested-by: Yi Zhang Reviewed-by: Christoph Hellwig Signed-off-by: Maurizio Lombardi Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7990c3f22ecf..4b9fda0b1d9a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) return 1; } -static void nvme_pci_update_nr_queues(struct nvme_dev *dev) +static bool nvme_pci_update_nr_queues(struct nvme_dev *dev) { if (!dev->ctrl.tagset) { nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); - return; + return true; + } + + /* Give up if we are racing with nvme_dev_disable() */ + if (!mutex_trylock(&dev->shutdown_lock)) + return false; + + /* Check if nvme_dev_disable() has been executed already */ + if (!dev->online_queues) { + mutex_unlock(&dev->shutdown_lock); + return false; } blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); /* free previously allocated queues that are no longer usable */ nvme_free_queues(dev, dev->online_queues); + mutex_unlock(&dev->shutdown_lock); + return true; } static int nvme_pci_enable(struct nvme_dev *dev) @@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_dbbuf_set(dev); nvme_unquiesce_io_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); - nvme_pci_update_nr_queues(dev); + if (!nvme_pci_update_nr_queues(dev)) + goto out; nvme_unfreeze(&dev->ctrl); } else { dev_warn(dev->ctrl.device, "IO queues lost\n"); From c199fac88fe7c749f88a0653e9f621b9f5a71cf1 Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Wed, 16 Oct 2024 08:33:14 +0530 Subject: [PATCH 16/19] nvme-loop: flush off pending I/O while shutting down loop controller While shutting down loop controller, we first quiesce the admin/IO queue, delete the admin/IO tag-set and then at last destroy the admin/IO queue. However it's quite possible that during the window between quiescing and destroying of the admin/IO queue, some admin/IO request might sneak in and if that happens then we could potentially encounter a hung task because shutdown operation can't forward progress until any pending I/O is flushed off. This commit helps ensure that before destroying the admin/IO queue, we unquiesce the admin/IO queue so that any outstanding requests, which are added after the admin/IO queue is quiesced, are now flushed to its completion. Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/target/loop.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index e32790d8fc26..a9d112d34d4f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; + /* + * It's possible that some requests might have been added + * after admin queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } ctrl->ctrl.queue_count = 1; + /* + * It's possible that some requests might have been added + * after io queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_io_queues(&ctrl->ctrl); } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) From d06923670b5a5f609603d4a9fee4dec02d38de9c Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Wed, 16 Oct 2024 08:33:15 +0530 Subject: [PATCH 17/19] nvme: make keep-alive synchronous operation The nvme keep-alive operation, which executes at a periodic interval, could potentially sneak in while shutting down a fabric controller. This may lead to a race between the fabric controller admin queue destroy code path (invoked while shutting down controller) and hw/hctx queue dispatcher called from the nvme keep-alive async request queuing operation. This race could lead to the kernel crash shown below: Call Trace: autoremove_wake_function+0x0/0xbc (unreliable) __blk_mq_sched_dispatch_requests+0x114/0x24c blk_mq_sched_dispatch_requests+0x44/0x84 blk_mq_run_hw_queue+0x140/0x220 nvme_keep_alive_work+0xc8/0x19c [nvme_core] process_one_work+0x200/0x4e0 worker_thread+0x340/0x504 kthread+0x138/0x140 start_kernel_thread+0x14/0x18 While shutting down fabric controller, if nvme keep-alive request sneaks in then it would be flushed off. The nvme_keep_alive_end_io function is then invoked to handle the end of the keep-alive operation which decrements the admin->q_usage_counter and assuming this is the last/only request in the admin queue then the admin->q_usage_counter becomes zero. If that happens then blk-mq destroy queue operation (blk_mq_destroy_ queue()) which could be potentially running simultaneously on another cpu (as this is the controller shutdown code path) would forward progress and deletes the admin queue. So, now from this point onward we are not supposed to access the admin queue resources. However the issue here's that the nvme keep-alive thread running hw/hctx queue dispatch operation hasn't yet finished its work and so it could still potentially access the admin queue resource while the admin queue had been already deleted and that causes the above crash. This fix helps avoid the observed crash by implementing keep-alive as a synchronous operation so that we decrement admin->q_usage_counter only after keep-alive command finished its execution and returns the command status back up to its caller (blk_execute_rq()). This would ensure that fabric shutdown code path doesn't destroy the fabric admin queue until keep-alive request finished execution and also keep-alive thread is not running hw/hctx queue dispatch operation. Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9e7e63e10e5a..60163840b8ff 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1292,10 +1292,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); } -static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, - blk_status_t status) +static void nvme_keep_alive_finish(struct request *rq, + blk_status_t status, struct nvme_ctrl *ctrl) { - struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); @@ -1313,13 +1312,11 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, delay = 0; } - blk_mq_free_request(rq); - if (status) { dev_err(ctrl->device, "failed nvme_keep_alive_end_io error=%d\n", status); - return RQ_END_IO_NONE; + return; } ctrl->ka_last_check_time = jiffies; @@ -1331,7 +1328,6 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); - return RQ_END_IO_NONE; } static void nvme_keep_alive_work(struct work_struct *work) @@ -1340,6 +1336,7 @@ static void nvme_keep_alive_work(struct work_struct *work) struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; struct request *rq; + blk_status_t status; ctrl->ka_last_check_time = jiffies; @@ -1362,9 +1359,9 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; - rq->end_io = nvme_keep_alive_end_io; - rq->end_io_data = ctrl; - blk_execute_rq_nowait(rq, false); + status = blk_execute_rq(rq, false); + nvme_keep_alive_finish(rq, status, ctrl); + blk_mq_free_request(rq); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) From 599d9f3a10eec69ef28a90161763e4bd7c9c02bf Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Wed, 16 Oct 2024 08:33:16 +0530 Subject: [PATCH 18/19] nvme: use helper nvme_ctrl_state in nvme_keep_alive_finish function We no more need acquiring ctrl->lock before accessing the NVMe controller state and instead we can now use the helper nvme_ctrl_state. So replace the use of ctrl->lock from nvme_keep_alive_finish function with nvme_ctrl_state call. Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 60163840b8ff..876c8e6311db 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1295,10 +1295,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) static void nvme_keep_alive_finish(struct request *rq, blk_status_t status, struct nvme_ctrl *ctrl) { - unsigned long flags; - bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long delay = nvme_keep_alive_work_period(ctrl); + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); /* * Subtract off the keepalive RTT so nvme_keep_alive_work runs @@ -1321,12 +1320,7 @@ static void nvme_keep_alive_finish(struct request *rq, ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->state == NVME_CTRL_LIVE || - ctrl->state == NVME_CTRL_CONNECTING) - startka = true; - spin_unlock_irqrestore(&ctrl->lock, flags); - if (startka) + if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); } From b0bf1afde7c34698cf61422fa8ee60e690dc25c3 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 17 Oct 2024 15:09:02 -0700 Subject: [PATCH 19/19] cdrom: Avoid barrier_nospec() in cdrom_ioctl_media_changed() The barrier_nospec() after the array bounds check is overkill and painfully slow for arches which implement it. Furthermore, most arches don't implement it, so they remain exposed to Spectre v1 (which can affect pretty much any CPU with branch prediction). Instead, clamp the user pointer to a valid range so it's guaranteed to be a valid array index even when the bounds check mispredicts. Fixes: 8270cb10c068 ("cdrom: Fix spectre-v1 gadget") Signed-off-by: Josh Poimboeuf Link: https://lore.kernel.org/r/1d86f4d9d8fba68e5ca64cdeac2451b95a8bf872.1729202937.git.jpoimboe@kernel.org Signed-off-by: Jens Axboe --- drivers/cdrom/cdrom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 9b0f37d4b9d4..6a99a459b80b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, return -EINVAL; /* Prevent arg from speculatively bypassing the length check */ - barrier_nospec(); + arg = array_index_nospec(arg, cdi->capacity); info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info)