mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
block-6.3-2023-04-06
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmQvgy4QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpgSDEACfbyn/e4JYSeG+NDhQz2Vk+RhQwiAlwx56 DbtgyCHHIldIsS7RDtvQAUwVGkKPfxmg42Ar9s//OQarDZF/JRl5kBInKW/cGdKD AIPiuR8OFjQZdeRsnPJ1U+cPUf5pqvpDGCjkopXXLcfdNHxAQwW/XyUh3Ibrrh7i GUrgc9W2zXrGF/WkB/a4sxdqrADq/rWYWmvRHMjGEQYd6+9+kY1m++0aP26ZoWj1 U+w9ZN9BJJxodAyaz37dCPKgrGnFGzLP6GkgOsP6SIAtLELw/2j65g1PXU1TZWWy 4GHr1k/7RWnNvzYoqYLgv3q5sXuC340XMrgtaDxKCri22w5owVepaSjBgA6FCGPC gEekR4yuzHEQ1dXGrNM95BD2v9hCQd7OdZjdNX4ZziaYFMscAX42Gg3Mh0INk9Wa HxS3Jx4pL3V9phGJeIp9ZebjBjaOQMllmnhg8otkVY3TwxYixE2XfIAhsNLcNhjK 1G/GjHbLkRYz4NetP8RDFuLrbNPBwPX3eYunGmuPx8DT+Uf2q/OfJJ5+W15xqWp1 6OK5ZxTuExQx9jrlA4fO7UioJeun426SGMwxDCtaNanXLnvvo0u+mX18tljHwGQz hGcQ0cam6O2jVQ1e+XZW5QJpG5fAG7O/AM5smmmzJ25RHylA937Gee39IDizIBRs 5TjTnRAqDw== =YMIj -----END PGP SIGNATURE----- Merge tag 'block-6.3-2023-04-06' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - Ensure that ublk always reads the whole sqe upfront (me) - Fix for a block size probing issue with ublk (Ming) - Fix for the bio based polling (Keith) - NVMe pull request via Christoph: - fix discard support without oncs (Keith Busch) - Partition scan error handling regression fix (Yu) * tag 'block-6.3-2023-04-06' of git://git.kernel.dk/linux: block: don't set GD_NEED_PART_SCAN if scan partition failed block: ublk: make sure that block size is set correctly ublk: read any SQE values upfront nvme: fix discard support without oncs blk-mq: directly poll requests
This commit is contained in:
commit
da0af3c559
@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
|
||||
return false;
|
||||
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
|
||||
return false;
|
||||
if (WARN_ON_ONCE(!rq->bio))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
|
||||
@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
|
||||
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
||||
{
|
||||
do {
|
||||
bio_poll(rq->bio, NULL, 0);
|
||||
blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
|
||||
cond_resched();
|
||||
} while (!completion_done(wait));
|
||||
}
|
||||
|
@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
|
||||
if (disk->open_partitions)
|
||||
return -EBUSY;
|
||||
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
/*
|
||||
* If the device is opened exclusively by current thread already, it's
|
||||
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
|
||||
@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
|
||||
return ret;
|
||||
}
|
||||
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
|
||||
if (IS_ERR(bdev))
|
||||
ret = PTR_ERR(bdev);
|
||||
else
|
||||
blkdev_put(bdev, mode & ~FMODE_EXCL);
|
||||
|
||||
/*
|
||||
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
|
||||
* and this will cause that re-assemble partitioned raid device will
|
||||
* creat partition for underlying disk.
|
||||
*/
|
||||
clear_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(disk->part0, disk_scan_partitions);
|
||||
return ret;
|
||||
|
@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
|
||||
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
|
||||
const struct ublk_param_basic *p = &ub->params.basic;
|
||||
|
||||
if (p->logical_bs_shift > PAGE_SHIFT)
|
||||
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
|
||||
return -EINVAL;
|
||||
|
||||
if (p->logical_bs_shift > p->physical_bs_shift)
|
||||
@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
|
||||
ublk_queue_cmd(ubq, req);
|
||||
}
|
||||
|
||||
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
||||
unsigned int issue_flags,
|
||||
struct ublksrv_io_cmd *ub_cmd)
|
||||
{
|
||||
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
|
||||
struct ublk_device *ub = cmd->file->private_data;
|
||||
struct ublk_queue *ubq;
|
||||
struct ublk_io *io;
|
||||
@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
return -EIOCBQUEUED;
|
||||
}
|
||||
|
||||
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
{
|
||||
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
|
||||
struct ublksrv_io_cmd ub_cmd;
|
||||
|
||||
/*
|
||||
* Not necessary for async retry, but let's keep it simple and always
|
||||
* copy the values to avoid any potential reuse.
|
||||
*/
|
||||
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
|
||||
ub_cmd.tag = READ_ONCE(ub_src->tag);
|
||||
ub_cmd.result = READ_ONCE(ub_src->result);
|
||||
ub_cmd.addr = READ_ONCE(ub_src->addr);
|
||||
|
||||
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
|
||||
}
|
||||
|
||||
static const struct file_operations ublk_ch_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ublk_ch_open,
|
||||
@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
|
||||
/* clear all we don't support yet */
|
||||
ub->params.types &= UBLK_PARAM_TYPE_ALL;
|
||||
ret = ublk_validate_params(ub);
|
||||
if (ret)
|
||||
ub->params.types = 0;
|
||||
}
|
||||
mutex_unlock(&ub->mutex);
|
||||
|
||||
|
@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
||||
struct request_queue *queue = disk->queue;
|
||||
u32 size = queue_logical_block_size(queue);
|
||||
|
||||
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
|
||||
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
|
||||
|
||||
if (ctrl->max_discard_sectors == 0) {
|
||||
blk_queue_max_discard_sectors(queue, 0);
|
||||
return;
|
||||
@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
||||
if (queue->limits.max_discard_sectors)
|
||||
return;
|
||||
|
||||
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
|
||||
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
|
||||
|
||||
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
|
||||
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user