block-6.13-20241207

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmdUZZMQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpvabEACho4v+8DnUGQsYQcTag65sFIFX6T9J1Ir+
 jTliTooYXso8GeTb0ZfG9tVtYDU8JJjBo2D2Ts0RELmt3+i06v7R4ZpKn96c7ez4
 PDSPVQNplMRGK3zrKr7/go8y2enlWbXMe7Cz7IblutbaoahIQjuEZ8EWe3xg+85d
 770oZ2srOvO8i6WHxBR1z4bcpCY8A5WB9VdKKLzsaptcwlGU1Gq0JAoFXAHdZQis
 CUtJ7QvDmmb/+A76QHW1lcjkRt1W26MfZ2tTvpcuVstsfppIqTnV1Y+5EJX2gYcP
 cmYVN+RQVfztmMERfdXEMXtiu4ls6ZKeu7BFV3nAEWpdgwx3mbc51XixAyZxJ8Z4
 eHgWcwR8SFfmBxRCDoTsFoGyE1pwLL3ZCIHB7S1QSxt3ebnd1o/1HRvGrdcVSecD
 Iyc/Kj3eAjNXmFWIuZe9wFk7d1QQcCvICiFaJ0+ST2tY/Ge4sC6stvVRIpYmSmKB
 8WWmMb0NvQIGJOJ2MgvjyR2/REsU09u8qhkg1gTfeuinO0uFNe91BKuMUPxk6en5
 lBnGiGnzfGo4iS71ltadXgW9eR7ll0Rtldkej99pLMwdSITq+C41XYbBbeSsSQeF
 jLh+dTi9mhC04ilXV20FTJPjWxj8MRd1bT0lENPgLM1EyP8HGKw1ebsK8EmTOzay
 cT3tewKb5g==
 =hx8Z
 -----END PGP SIGNATURE-----

Merge tag 'block-6.13-20241207' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
      - Target fix using incorrect zero buffer (Nilay)
      - Device specifc deallocate quirk fixes (Christoph, Keith)
      - Fabrics fix for handling max command target bugs (Maurizio)
      - Cocci fix usage for kzalloc (Yu-Chen)
      - DMA size fix for host memory buffer feature (Christoph)
      - Fabrics queue cleanup fixes (Chunguang)

 - CPU hotplug ordering fixes

 - Add missing MODULE_DESCRIPTION for rnull

 - bcache error value fix

 - virtio-blk queue freeze fix

* tag 'block-6.13-20241207' of git://git.kernel.dk/linux:
  blk-mq: move cpuhp callback registering out of q->sysfs_lock
  blk-mq: register cpuhp callback after hctx is added to xarray table
  virtio-blk: don't keep queue frozen during system suspend
  nvme-tcp: simplify nvme_tcp_teardown_io_queues()
  nvme-tcp: no need to quiesce admin_q in nvme_tcp_teardown_io_queues()
  nvme-rdma: unquiesce admin_q before destroy it
  nvme-tcp: fix the memleak while create new ctrl failed
  nvme-pci: don't use dma_alloc_noncontiguous with 0 merge boundary
  nvmet: replace kmalloc + memset with kzalloc for data allocation
  nvme-fabrics: handle zero MAXCMD without closing the connection
  bcache: revert replacing IS_ERR_OR_NULL with IS_ERR again
  nvme-pci: remove two deallocate zeroes quirks
  block: rnull: add missing MODULE_DESCRIPTION
  nvme: don't apply NVME_QUIRK_DEALLOCATE_ZEROES when DSM is not supported
  nvmet: use kzalloc instead of ZERO_PAGE in nvme_execute_identify_ns_nvm()
This commit is contained in:
Linus Torvalds 2024-12-07 10:07:05 -08:00
commit 7503345ac5
10 changed files with 123 additions and 47 deletions

View File

@ -43,6 +43,7 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
static DEFINE_MUTEX(blk_mq_cpuhp_lock);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq,
@ -3739,13 +3740,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
return 0;
}
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_STACKING))
lockdep_assert_held(&blk_mq_cpuhp_lock);
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
!hlist_unhashed(&hctx->cpuhp_online)) {
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
&hctx->cpuhp_online);
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
&hctx->cpuhp_dead);
INIT_HLIST_NODE(&hctx->cpuhp_online);
}
if (!hlist_unhashed(&hctx->cpuhp_dead)) {
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
&hctx->cpuhp_dead);
INIT_HLIST_NODE(&hctx->cpuhp_dead);
}
}
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
mutex_lock(&blk_mq_cpuhp_lock);
__blk_mq_remove_cpuhp(hctx);
mutex_unlock(&blk_mq_cpuhp_lock);
}
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
{
lockdep_assert_held(&blk_mq_cpuhp_lock);
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
hlist_unhashed(&hctx->cpuhp_online))
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
&hctx->cpuhp_online);
if (hlist_unhashed(&hctx->cpuhp_dead))
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
&hctx->cpuhp_dead);
}
static void __blk_mq_remove_cpuhp_list(struct list_head *head)
{
struct blk_mq_hw_ctx *hctx;
lockdep_assert_held(&blk_mq_cpuhp_lock);
list_for_each_entry(hctx, head, hctx_list)
__blk_mq_remove_cpuhp(hctx);
}
/*
* Unregister cpuhp callbacks from exited hw queues
*
* Safe to call if this `request_queue` is live
*/
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
{
LIST_HEAD(hctx_list);
spin_lock(&q->unused_hctx_lock);
list_splice_init(&q->unused_hctx_list, &hctx_list);
spin_unlock(&q->unused_hctx_lock);
mutex_lock(&blk_mq_cpuhp_lock);
__blk_mq_remove_cpuhp_list(&hctx_list);
mutex_unlock(&blk_mq_cpuhp_lock);
spin_lock(&q->unused_hctx_lock);
list_splice(&hctx_list, &q->unused_hctx_list);
spin_unlock(&q->unused_hctx_lock);
}
/*
* Register cpuhp callbacks from all hw queues
*
* Safe to call if this `request_queue` is live
*/
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
mutex_lock(&blk_mq_cpuhp_lock);
queue_for_each_hw_ctx(q, hctx, i)
__blk_mq_add_cpuhp(hctx);
mutex_unlock(&blk_mq_cpuhp_lock);
}
/*
@ -3796,8 +3875,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
blk_mq_remove_cpuhp(hctx);
xa_erase(&q->hctx_table, hctx_idx);
spin_lock(&q->unused_hctx_lock);
@ -3814,6 +3891,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
blk_mq_remove_cpuhp(hctx);
blk_mq_exit_hctx(q, set, hctx, i);
}
}
@ -3824,16 +3902,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
{
hctx->queue_num = hctx_idx;
if (!(hctx->flags & BLK_MQ_F_STACKING))
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
&hctx->cpuhp_online);
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto unregister_cpu_notifier;
goto fail;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
hctx->numa_node))
@ -3850,8 +3923,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
unregister_cpu_notifier:
blk_mq_remove_cpuhp(hctx);
fail:
return -1;
}
@ -3877,6 +3949,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
INIT_HLIST_NODE(&hctx->cpuhp_dead);
INIT_HLIST_NODE(&hctx->cpuhp_online);
hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
@ -4415,6 +4489,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
mutex_unlock(&q->sysfs_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
/* register cpuhp for new initialized hctxs */
blk_mq_add_hw_queues_cpuhp(q);
}
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,

View File

@ -28,6 +28,7 @@
type: NullBlkModule,
name: "rnull_mod",
author: "Andreas Hindborg",
description: "Rust implementation of the C null block driver",
license: "GPL v2",
}

View File

@ -1586,9 +1586,12 @@ static void virtblk_remove(struct virtio_device *vdev)
static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
struct request_queue *q = vblk->disk->queue;
/* Ensure no requests in virtqueues before deleting vqs. */
blk_mq_freeze_queue(vblk->disk->queue);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue_nowait(q);
blk_mq_unfreeze_queue(q);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@ -1612,8 +1615,8 @@ static int virtblk_restore(struct virtio_device *vdev)
return ret;
virtio_device_ready(vdev);
blk_mq_unquiesce_queue(vblk->disk->queue);
blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif

View File

@ -1718,7 +1718,7 @@ static CLOSURE_CALLBACK(cache_set_flush)
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
if (!IS_ERR(c->root))
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
/*

View File

@ -2071,7 +2071,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
lim->physical_block_size = min(phys_bs, atomic_bs);
lim->io_min = phys_bs;
lim->io_opt = io_opt;
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
(ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
lim->max_write_zeroes_sectors = UINT_MAX;
else
lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
@ -3260,8 +3261,9 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
}
if (!ctrl->maxcmd) {
dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
return -EINVAL;
dev_warn(ctrl->device,
"Firmware bug: maximum outstanding commands is 0\n");
ctrl->maxcmd = ctrl->sqsize + 1;
}
return 0;

View File

@ -2172,6 +2172,7 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev);
u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
u64 chunk_size;
@ -2180,7 +2181,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
* If there is an IOMMU that can merge pages, try a virtually
* non-contiguous allocation for a single segment first.
*/
if (!(PAGE_SIZE & dma_get_merge_boundary(dev->dev))) {
if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) {
if (!nvme_alloc_host_mem_single(dev, preferred))
return 0;
}
@ -3588,12 +3589,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES |
NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
.driver_data = NVME_QUIRK_STRIPE_SIZE, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ |

View File

@ -1091,13 +1091,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
}
destroy_admin:
nvme_stop_keep_alive(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
if (new)
nvme_remove_admin_tag_set(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl);
nvme_rdma_teardown_admin_queue(ctrl, new);
return ret;
}

View File

@ -2101,14 +2101,6 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret;
}
static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
if (remove)
nvme_remove_admin_tag_set(ctrl);
nvme_tcp_free_admin_queue(ctrl);
}
static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
{
int error;
@ -2163,9 +2155,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
if (remove)
if (remove) {
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
nvme_remove_admin_tag_set(ctrl);
}
nvme_tcp_free_admin_queue(ctrl);
if (ctrl->tls_pskid) {
dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
ctrl->tls_pskid);
@ -2178,7 +2172,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
nvme_quiesce_admin_queue(ctrl);
nvme_quiesce_io_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
@ -2278,7 +2271,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
}
destroy_admin:
nvme_stop_keep_alive(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
nvme_tcp_teardown_admin_queue(ctrl, new);
return ret;
}

View File

@ -902,13 +902,18 @@ static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
{
u16 status;
struct nvme_id_ns_nvm *id;
status = nvmet_req_find_ns(req);
if (status)
goto out;
status = nvmet_copy_to_sgl(req, 0, ZERO_PAGE(0),
NVME_IDENTIFY_DATA_SIZE);
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out:
nvmet_req_complete(req, status);
}

View File

@ -828,12 +828,11 @@ static void nvmet_execute_pr_report(struct nvmet_req *req)
goto out;
}
data = kmalloc(num_bytes, GFP_KERNEL);
data = kzalloc(num_bytes, GFP_KERNEL);
if (!data) {
status = NVME_SC_INTERNAL;
goto out;
}
memset(data, 0, num_bytes);
data->gen = cpu_to_le32(atomic_read(&pr->generation));
data->ptpls = 0;
ctrl_eds = data->regctl_eds;