mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 12:07:46 +00:00
next-20250110/block
This commit is contained in:
commit
793960099d
@ -865,7 +865,6 @@ static int ubd_add(int n, char **error_out)
|
||||
ubd_dev->tag_set.ops = &ubd_mq_ops;
|
||||
ubd_dev->tag_set.queue_depth = 64;
|
||||
ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
|
||||
ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ubd_dev->tag_set.driver_data = ubd_dev;
|
||||
ubd_dev->tag_set.nr_hw_queues = 1;
|
||||
|
||||
|
@ -27,8 +27,6 @@ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
|
||||
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
|
||||
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
||||
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
||||
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
||||
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||
|
@ -7622,7 +7622,7 @@ static ssize_t bfq_low_latency_store(struct elevator_queue *e,
|
||||
#define BFQ_ATTR(name) \
|
||||
__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
|
||||
|
||||
static struct elv_fs_entry bfq_attrs[] = {
|
||||
static const struct elv_fs_entry bfq_attrs[] = {
|
||||
BFQ_ATTR(fifo_expire_sync),
|
||||
BFQ_ATTR(fifo_expire_async),
|
||||
BFQ_ATTR(back_seek_max),
|
||||
|
@ -118,17 +118,18 @@ static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
|
||||
|
||||
static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
|
||||
{
|
||||
unsigned short nr_vecs = bip->bip_max_vcnt - 1;
|
||||
struct bio_vec *copy = &bip->bip_vec[1];
|
||||
size_t bytes = bip->bip_iter.bi_size;
|
||||
struct iov_iter iter;
|
||||
unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
|
||||
struct bio_vec *orig_bvecs = &bip->bip_vec[1];
|
||||
struct bio_vec *bounce_bvec = &bip->bip_vec[0];
|
||||
size_t bytes = bounce_bvec->bv_len;
|
||||
struct iov_iter orig_iter;
|
||||
int ret;
|
||||
|
||||
iov_iter_bvec(&iter, ITER_DEST, copy, nr_vecs, bytes);
|
||||
ret = copy_to_iter(bvec_virt(bip->bip_vec), bytes, &iter);
|
||||
iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
|
||||
ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
|
||||
WARN_ON_ONCE(ret != bytes);
|
||||
|
||||
bio_integrity_unpin_bvec(copy, nr_vecs, true);
|
||||
bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -301,16 +302,15 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
|
||||
return nr_bvecs;
|
||||
}
|
||||
|
||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
|
||||
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
|
||||
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
|
||||
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
|
||||
size_t offset, bytes = iter->count;
|
||||
unsigned int direction, nr_bvecs;
|
||||
struct iov_iter iter;
|
||||
int ret, nr_vecs;
|
||||
size_t offset;
|
||||
bool copy;
|
||||
|
||||
if (bio_integrity(bio))
|
||||
@ -323,8 +323,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
|
||||
else
|
||||
direction = ITER_SOURCE;
|
||||
|
||||
iov_iter_ubuf(&iter, direction, ubuf, bytes);
|
||||
nr_vecs = iov_iter_npages(&iter, BIO_MAX_VECS + 1);
|
||||
nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
|
||||
if (nr_vecs > BIO_MAX_VECS)
|
||||
return -E2BIG;
|
||||
if (nr_vecs > UIO_FASTIOV) {
|
||||
@ -334,8 +333,8 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
|
||||
pages = NULL;
|
||||
}
|
||||
|
||||
copy = !iov_iter_is_aligned(&iter, align, align);
|
||||
ret = iov_iter_extract_pages(&iter, &pages, bytes, nr_vecs, 0, &offset);
|
||||
copy = !iov_iter_is_aligned(iter, align, align);
|
||||
ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs, 0, &offset);
|
||||
if (unlikely(ret < 0))
|
||||
goto free_bvec;
|
||||
|
||||
@ -365,6 +364,55 @@ free_bvec:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (meta->flags & IO_INTEGRITY_CHK_GUARD)
|
||||
bip->bip_flags |= BIP_CHECK_GUARD;
|
||||
if (meta->flags & IO_INTEGRITY_CHK_APPTAG)
|
||||
bip->bip_flags |= BIP_CHECK_APPTAG;
|
||||
if (meta->flags & IO_INTEGRITY_CHK_REFTAG)
|
||||
bip->bip_flags |= BIP_CHECK_REFTAG;
|
||||
|
||||
bip->app_tag = meta->app_tag;
|
||||
}
|
||||
|
||||
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
unsigned int integrity_bytes;
|
||||
int ret;
|
||||
struct iov_iter it;
|
||||
|
||||
if (!bi)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* original meta iterator can be bigger.
|
||||
* process integrity info corresponding to current data buffer only.
|
||||
*/
|
||||
it = meta->iter;
|
||||
integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
|
||||
if (it.count < integrity_bytes)
|
||||
return -EINVAL;
|
||||
|
||||
/* should fit into two bytes */
|
||||
BUILD_BUG_ON(IO_INTEGRITY_VALID_FLAGS >= (1 << 16));
|
||||
|
||||
if (meta->flags && (meta->flags & ~IO_INTEGRITY_VALID_FLAGS))
|
||||
return -EINVAL;
|
||||
|
||||
it.count = integrity_bytes;
|
||||
ret = bio_integrity_map_user(bio, &it);
|
||||
if (!ret) {
|
||||
bio_uio_meta_to_bip(bio, meta);
|
||||
bip_set_seed(bio_integrity(bio), meta->seed);
|
||||
iov_iter_advance(&meta->iter, integrity_bytes);
|
||||
meta->seed += bio_integrity_intervals(bi, bio_sectors(bio));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_prep - Prepare bio for integrity I/O
|
||||
* @bio: bio to prepare
|
||||
@ -435,6 +483,11 @@ bool bio_integrity_prep(struct bio *bio)
|
||||
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
|
||||
bip->bip_flags |= BIP_IP_CHECKSUM;
|
||||
|
||||
/* describe what tags to check in payload */
|
||||
if (bi->csum_type)
|
||||
bip->bip_flags |= BIP_CHECK_GUARD;
|
||||
if (bi->flags & BLK_INTEGRITY_REF_TAG)
|
||||
bip->bip_flags |= BIP_CHECK_REFTAG;
|
||||
if (bio_integrity_add_page(bio, virt_to_page(buf), len,
|
||||
offset_in_page(buf)) < len) {
|
||||
printk(KERN_ERR "could not attach integrity payload\n");
|
||||
@ -559,7 +612,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
|
||||
|
||||
bip->bip_vec = bip_src->bip_vec;
|
||||
bip->bip_iter = bip_src->bip_iter;
|
||||
bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
|
||||
bip->bip_flags = bip_src->bip_flags & BIP_CLONE_FLAGS;
|
||||
bip->app_tag = bip_src->app_tag;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
107
block/bio.c
107
block/bio.c
@ -946,8 +946,11 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
|
||||
|
||||
/*
|
||||
* Try to merge a page into a segment, while obeying the hardware segment
|
||||
* size limit. This is not for normal read/write bios, but for passthrough
|
||||
* or Zone Append operations that we can't split.
|
||||
* size limit.
|
||||
*
|
||||
* This is kept around for the integrity metadata, which is still tries
|
||||
* to build the initial bio to the hardware limit and doesn't have proper
|
||||
* helpers to split. Hopefully this will go away soon.
|
||||
*/
|
||||
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
|
||||
struct page *page, unsigned len, unsigned offset,
|
||||
@ -964,106 +967,6 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
|
||||
return bvec_try_merge_page(bv, page, len, offset, same_page);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_add_hw_page - attempt to add a page to a bio with hw constraints
|
||||
* @q: the target queue
|
||||
* @bio: destination bio
|
||||
* @page: page to add
|
||||
* @len: vec entry length
|
||||
* @offset: vec entry offset
|
||||
* @max_sectors: maximum number of sectors that can be added
|
||||
* @same_page: return if the segment has been merged inside the same page
|
||||
*
|
||||
* Add a page to a bio while respecting the hardware max_sectors, max_segment
|
||||
* and gap limitations.
|
||||
*/
|
||||
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
|
||||
struct page *page, unsigned int len, unsigned int offset,
|
||||
unsigned int max_sectors, bool *same_page)
|
||||
{
|
||||
unsigned int max_size = max_sectors << SECTOR_SHIFT;
|
||||
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return 0;
|
||||
|
||||
len = min3(len, max_size, queue_max_segment_size(q));
|
||||
if (len > max_size - bio->bi_iter.bi_size)
|
||||
return 0;
|
||||
|
||||
if (bio->bi_vcnt > 0) {
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (bvec_try_merge_hw_page(q, bv, page, len, offset,
|
||||
same_page)) {
|
||||
bio->bi_iter.bi_size += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
if (bio->bi_vcnt >=
|
||||
min(bio->bi_max_vecs, queue_max_segments(q)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the queue doesn't support SG gaps and adding this segment
|
||||
* would create a gap, disallow it.
|
||||
*/
|
||||
if (bvec_gap_to_prev(&q->limits, bv, offset))
|
||||
return 0;
|
||||
}
|
||||
|
||||
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
|
||||
bio->bi_vcnt++;
|
||||
bio->bi_iter.bi_size += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_add_hw_folio - attempt to add a folio to a bio with hw constraints
|
||||
* @q: the target queue
|
||||
* @bio: destination bio
|
||||
* @folio: folio to add
|
||||
* @len: vec entry length
|
||||
* @offset: vec entry offset in the folio
|
||||
* @max_sectors: maximum number of sectors that can be added
|
||||
* @same_page: return if the segment has been merged inside the same folio
|
||||
*
|
||||
* Add a folio to a bio while respecting the hardware max_sectors, max_segment
|
||||
* and gap limitations.
|
||||
*/
|
||||
int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
|
||||
struct folio *folio, size_t len, size_t offset,
|
||||
unsigned int max_sectors, bool *same_page)
|
||||
{
|
||||
if (len > UINT_MAX || offset > UINT_MAX)
|
||||
return 0;
|
||||
return bio_add_hw_page(q, bio, folio_page(folio, 0), len, offset,
|
||||
max_sectors, same_page);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_add_pc_page - attempt to add page to passthrough bio
|
||||
* @q: the target queue
|
||||
* @bio: destination bio
|
||||
* @page: page to add
|
||||
* @len: vec entry length
|
||||
* @offset: vec entry offset
|
||||
*
|
||||
* Attempt to add a page to the bio_vec maplist. This can fail for a
|
||||
* number of reasons, such as the bio being full or target block device
|
||||
* limitations. The target block device must allow bio's up to PAGE_SIZE,
|
||||
* so it is always possible to add a single page to an empty bio.
|
||||
*
|
||||
* This should only be used by passthrough bios.
|
||||
*/
|
||||
int bio_add_pc_page(struct request_queue *q, struct bio *bio,
|
||||
struct page *page, unsigned int len, unsigned int offset)
|
||||
{
|
||||
bool same_page = false;
|
||||
return bio_add_hw_page(q, bio, page, len, offset,
|
||||
queue_max_hw_sectors(q), &same_page);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_add_pc_page);
|
||||
|
||||
/**
|
||||
* __bio_add_page - add page(s) to a bio in a new segment
|
||||
* @bio: destination bio
|
||||
|
@ -115,8 +115,16 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
|
||||
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
|
||||
ssize_t bytes)
|
||||
{
|
||||
int ret = bio_integrity_map_user(rq->bio, ubuf, bytes);
|
||||
int ret;
|
||||
struct iov_iter iter;
|
||||
unsigned int direction;
|
||||
|
||||
if (op_is_write(req_op(rq)))
|
||||
direction = ITER_DEST;
|
||||
else
|
||||
direction = ITER_SOURCE;
|
||||
iov_iter_ubuf(&iter, direction, ubuf, bytes);
|
||||
ret = bio_integrity_map_user(rq->bio, &iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
126
block/blk-map.c
126
block/blk-map.c
@ -189,7 +189,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||
}
|
||||
}
|
||||
|
||||
if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
|
||||
if (bio_add_page(bio, page, bytes, offset) < bytes) {
|
||||
if (!map_data)
|
||||
__free_page(page);
|
||||
break;
|
||||
@ -272,86 +272,27 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
|
||||
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
iov_iter_extraction_t extraction_flags = 0;
|
||||
unsigned int max_sectors = queue_max_hw_sectors(rq->q);
|
||||
unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
int j;
|
||||
|
||||
if (!iov_iter_count(iter))
|
||||
return -EINVAL;
|
||||
|
||||
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
|
||||
if (bio == NULL)
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
if (blk_queue_pci_p2pdma(rq->q))
|
||||
extraction_flags |= ITER_ALLOW_P2PDMA;
|
||||
if (iov_iter_extract_will_pin(iter))
|
||||
bio_set_flag(bio, BIO_PAGE_PINNED);
|
||||
|
||||
while (iov_iter_count(iter)) {
|
||||
struct page *stack_pages[UIO_FASTIOV];
|
||||
struct page **pages = stack_pages;
|
||||
ssize_t bytes;
|
||||
size_t offs;
|
||||
int npages;
|
||||
|
||||
if (nr_vecs > ARRAY_SIZE(stack_pages))
|
||||
pages = NULL;
|
||||
|
||||
bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
|
||||
nr_vecs, extraction_flags, &offs);
|
||||
if (unlikely(bytes <= 0)) {
|
||||
ret = bytes ? bytes : -EFAULT;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
|
||||
|
||||
if (unlikely(offs & queue_dma_alignment(rq->q)))
|
||||
j = 0;
|
||||
else {
|
||||
for (j = 0; j < npages; j++) {
|
||||
struct page *page = pages[j];
|
||||
unsigned int n = PAGE_SIZE - offs;
|
||||
bool same_page = false;
|
||||
|
||||
if (n > bytes)
|
||||
n = bytes;
|
||||
|
||||
if (!bio_add_hw_page(rq->q, bio, page, n, offs,
|
||||
max_sectors, &same_page))
|
||||
break;
|
||||
|
||||
if (same_page)
|
||||
bio_release_page(bio, page);
|
||||
bytes -= n;
|
||||
offs = 0;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* release the pages we didn't map into the bio, if any
|
||||
*/
|
||||
while (j < npages)
|
||||
bio_release_page(bio, pages[j++]);
|
||||
if (pages != stack_pages)
|
||||
kvfree(pages);
|
||||
/* couldn't stuff something into bio? */
|
||||
if (bytes) {
|
||||
iov_iter_revert(iter, bytes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret = bio_iov_iter_get_pages(bio, iter);
|
||||
if (ret)
|
||||
goto out_put;
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
goto out_release;
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
out_release:
|
||||
bio_release_pages(bio, false);
|
||||
out_put:
|
||||
blk_mq_map_bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
@ -422,8 +363,7 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
|
||||
page = virt_to_page(data);
|
||||
else
|
||||
page = vmalloc_to_page(data);
|
||||
if (bio_add_pc_page(q, bio, page, bytes,
|
||||
offset) < bytes) {
|
||||
if (bio_add_page(bio, page, bytes, offset) < bytes) {
|
||||
/* we don't support partial mappings */
|
||||
bio_uninit(bio);
|
||||
kfree(bio);
|
||||
@ -507,7 +447,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
|
||||
if (!reading)
|
||||
memcpy(page_address(page), p, bytes);
|
||||
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
|
||||
if (bio_add_page(bio, page, bytes, 0) < bytes)
|
||||
break;
|
||||
|
||||
len -= bytes;
|
||||
@ -536,24 +476,33 @@ cleanup:
|
||||
*/
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
const struct queue_limits *lim = &rq->q->limits;
|
||||
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
|
||||
unsigned int nr_segs = 0;
|
||||
int ret;
|
||||
|
||||
bio_for_each_bvec(bv, bio, iter)
|
||||
nr_segs++;
|
||||
/* check that the data layout matches the hardware restrictions */
|
||||
ret = bio_split_rw_at(bio, lim, &nr_segs, max_bytes);
|
||||
if (ret) {
|
||||
/* if we would have to split the bio, copy instead */
|
||||
if (ret > 0)
|
||||
ret = -EREMOTEIO;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!rq->bio) {
|
||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||
} else {
|
||||
if (rq->bio) {
|
||||
if (!ll_back_merge_fn(rq, bio, nr_segs))
|
||||
return -EINVAL;
|
||||
rq->biotail->bi_next = bio;
|
||||
rq->biotail = bio;
|
||||
rq->__data_len += (bio)->bi_iter.bi_size;
|
||||
rq->__data_len += bio->bi_iter.bi_size;
|
||||
bio_crypt_free_ctx(bio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rq->nr_phys_segments = nr_segs;
|
||||
rq->bio = rq->biotail = bio;
|
||||
rq->__data_len = bio->bi_iter.bi_size;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_append_bio);
|
||||
@ -561,9 +510,7 @@ EXPORT_SYMBOL(blk_rq_append_bio);
|
||||
/* Prepare bio for passthrough IO given ITER_BVEC iter */
|
||||
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||
{
|
||||
const struct queue_limits *lim = &rq->q->limits;
|
||||
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
|
||||
unsigned int nsegs;
|
||||
unsigned int max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
@ -576,18 +523,10 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||
return -ENOMEM;
|
||||
bio_iov_bvec_set(bio, iter);
|
||||
|
||||
/* check that the data layout matches the hardware restrictions */
|
||||
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
|
||||
if (ret) {
|
||||
/* if we would have to split the bio, copy instead */
|
||||
if (ret > 0)
|
||||
ret = -EREMOTEIO;
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (ret)
|
||||
blk_mq_map_bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
blk_rq_bio_prep(rq, bio, nsegs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -644,8 +583,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
|
||||
else
|
||||
ret = bio_map_user_iov(rq, &i, gfp_mask);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (ret == -EREMOTEIO)
|
||||
ret = -EINVAL;
|
||||
goto unmap_rq;
|
||||
}
|
||||
if (!bio)
|
||||
bio = rq->bio;
|
||||
} while (iov_iter_count(&i));
|
||||
|
@ -473,6 +473,63 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
||||
return nr_phys_segs;
|
||||
}
|
||||
|
||||
struct phys_vec {
|
||||
phys_addr_t paddr;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
static bool blk_map_iter_next(struct request *req,
|
||||
struct req_iterator *iter, struct phys_vec *vec)
|
||||
{
|
||||
unsigned int max_size;
|
||||
struct bio_vec bv;
|
||||
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
if (!iter->bio)
|
||||
return false;
|
||||
vec->paddr = bvec_phys(&req->special_vec);
|
||||
vec->len = req->special_vec.bv_len;
|
||||
iter->bio = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!iter->iter.bi_size)
|
||||
return false;
|
||||
|
||||
bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
vec->paddr = bvec_phys(&bv);
|
||||
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
|
||||
bv.bv_len = min(bv.bv_len, max_size);
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
|
||||
|
||||
/*
|
||||
* If we are entirely done with this bi_io_vec entry, check if the next
|
||||
* one could be merged into it. This typically happens when moving to
|
||||
* the next bio, but some callers also don't pack bvecs tight.
|
||||
*/
|
||||
while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
|
||||
struct bio_vec next;
|
||||
|
||||
if (!iter->iter.bi_size) {
|
||||
if (!iter->bio->bi_next)
|
||||
break;
|
||||
iter->bio = iter->bio->bi_next;
|
||||
iter->iter = iter->bio->bi_iter;
|
||||
}
|
||||
|
||||
next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
if (bv.bv_len + next.bv_len > max_size ||
|
||||
!biovec_phys_mergeable(req->q, &bv, &next))
|
||||
break;
|
||||
|
||||
bv.bv_len += next.bv_len;
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
|
||||
}
|
||||
|
||||
vec->len = bv.bv_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||
struct scatterlist *sglist)
|
||||
{
|
||||
@ -490,120 +547,26 @@ static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||
return sg_next(*sg);
|
||||
}
|
||||
|
||||
static unsigned blk_bvec_map_sg(struct request_queue *q,
|
||||
struct bio_vec *bvec, struct scatterlist *sglist,
|
||||
struct scatterlist **sg)
|
||||
{
|
||||
unsigned nbytes = bvec->bv_len;
|
||||
unsigned nsegs = 0, total = 0;
|
||||
|
||||
while (nbytes > 0) {
|
||||
unsigned offset = bvec->bv_offset + total;
|
||||
unsigned len = get_max_segment_size(&q->limits,
|
||||
bvec_phys(bvec) + total, nbytes);
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
/*
|
||||
* Unfortunately a fair number of drivers barf on scatterlists
|
||||
* that have an offset larger than PAGE_SIZE, despite other
|
||||
* subsystems dealing with that invariant just fine. For now
|
||||
* stick to the legacy format where we never present those from
|
||||
* the block layer, but the code below should be removed once
|
||||
* these offenders (mostly MMC/SD drivers) are fixed.
|
||||
*/
|
||||
page += (offset >> PAGE_SHIFT);
|
||||
offset &= ~PAGE_MASK;
|
||||
|
||||
*sg = blk_next_sg(sg, sglist);
|
||||
sg_set_page(*sg, page, len, offset);
|
||||
|
||||
total += len;
|
||||
nbytes -= len;
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
static inline int __blk_bvec_map_sg(struct bio_vec bv,
|
||||
struct scatterlist *sglist, struct scatterlist **sg)
|
||||
{
|
||||
*sg = blk_next_sg(sg, sglist);
|
||||
sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* only try to merge bvecs into one sg if they are from two bios */
|
||||
static inline bool
|
||||
__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
|
||||
struct bio_vec *bvprv, struct scatterlist **sg)
|
||||
{
|
||||
|
||||
int nbytes = bvec->bv_len;
|
||||
|
||||
if (!*sg)
|
||||
return false;
|
||||
|
||||
if ((*sg)->length + nbytes > queue_max_segment_size(q))
|
||||
return false;
|
||||
|
||||
if (!biovec_phys_mergeable(q, bvprv, bvec))
|
||||
return false;
|
||||
|
||||
(*sg)->length += nbytes;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
||||
struct scatterlist *sglist,
|
||||
struct scatterlist **sg)
|
||||
{
|
||||
struct bio_vec bvec, bvprv = { NULL };
|
||||
struct bvec_iter iter;
|
||||
int nsegs = 0;
|
||||
bool new_bio = false;
|
||||
|
||||
for_each_bio(bio) {
|
||||
bio_for_each_bvec(bvec, bio, iter) {
|
||||
/*
|
||||
* Only try to merge bvecs from two bios given we
|
||||
* have done bio internal merge when adding pages
|
||||
* to bio
|
||||
*/
|
||||
if (new_bio &&
|
||||
__blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
|
||||
goto next_bvec;
|
||||
|
||||
if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
|
||||
nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
|
||||
else
|
||||
nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
|
||||
next_bvec:
|
||||
new_bio = false;
|
||||
}
|
||||
if (likely(bio->bi_iter.bi_size)) {
|
||||
bvprv = bvec;
|
||||
new_bio = true;
|
||||
}
|
||||
}
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
/*
|
||||
* map a request to scatterlist, return number of sg entries setup. Caller
|
||||
* must make sure sg can hold rq->nr_phys_segments entries
|
||||
* Map a request to scatterlist, return number of sg entries setup. Caller
|
||||
* must make sure sg can hold rq->nr_phys_segments entries.
|
||||
*/
|
||||
int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||
struct scatterlist *sglist, struct scatterlist **last_sg)
|
||||
{
|
||||
struct req_iterator iter = {
|
||||
.bio = rq->bio,
|
||||
.iter = rq->bio->bi_iter,
|
||||
};
|
||||
struct phys_vec vec;
|
||||
int nsegs = 0;
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
|
||||
else if (rq->bio)
|
||||
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
|
||||
while (blk_map_iter_next(rq, &iter, &vec)) {
|
||||
*last_sg = blk_next_sg(last_sg, sglist);
|
||||
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
|
||||
offset_in_page(vec.paddr));
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
if (*last_sg)
|
||||
sg_mark_end(*last_sg);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/group_cpus.h>
|
||||
#include <linux/device/bus.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
@ -54,3 +55,39 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
|
||||
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_map_hw_queues - Create CPU to hardware queue mapping
|
||||
* @qmap: CPU to hardware queue map
|
||||
* @dev: The device to map queues
|
||||
* @offset: Queue offset to use for the device
|
||||
*
|
||||
* Create a CPU to hardware queue mapping in @qmap. The struct bus_type
|
||||
* irq_get_affinity callback will be used to retrieve the affinity.
|
||||
*/
|
||||
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
||||
struct device *dev, unsigned int offset)
|
||||
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
if (!dev->bus->irq_get_affinity)
|
||||
goto fallback;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = dev->bus->irq_get_affinity(dev, queue + offset);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
||||
blk_mq_clear_mq_map(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|
||||
|
@ -172,21 +172,13 @@ static int hctx_state_show(void *data, struct seq_file *m)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
|
||||
static const char *const alloc_policy_name[] = {
|
||||
BLK_TAG_ALLOC_NAME(FIFO),
|
||||
BLK_TAG_ALLOC_NAME(RR),
|
||||
};
|
||||
#undef BLK_TAG_ALLOC_NAME
|
||||
|
||||
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
|
||||
static const char *const hctx_flag_name[] = {
|
||||
HCTX_FLAG_NAME(SHOULD_MERGE),
|
||||
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
|
||||
HCTX_FLAG_NAME(STACKING),
|
||||
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
|
||||
HCTX_FLAG_NAME(BLOCKING),
|
||||
HCTX_FLAG_NAME(NO_SCHED),
|
||||
HCTX_FLAG_NAME(TAG_RR),
|
||||
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
|
||||
};
|
||||
#undef HCTX_FLAG_NAME
|
||||
@ -194,22 +186,11 @@ static const char *const hctx_flag_name[] = {
|
||||
static int hctx_flags_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = data;
|
||||
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
|
||||
BLK_MQ_F_ALLOC_POLICY_START_BIT);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != ilog2(BLK_MQ_F_MAX));
|
||||
|
||||
seq_puts(m, "alloc_policy=");
|
||||
if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
|
||||
alloc_policy_name[alloc_policy])
|
||||
seq_puts(m, alloc_policy_name[alloc_policy]);
|
||||
else
|
||||
seq_printf(m, "%d", alloc_policy);
|
||||
seq_puts(m, " ");
|
||||
blk_flags_show(m,
|
||||
hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
|
||||
hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
|
||||
blk_flags_show(m, hctx->flags, hctx_flag_name,
|
||||
ARRAY_SIZE(hctx_flag_name));
|
||||
seq_puts(m, "\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,46 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "blk-mq.h"
|
||||
|
||||
/**
|
||||
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
|
||||
* @qmap: CPU to hardware queue map.
|
||||
* @pdev: PCI device associated with @set.
|
||||
* @offset: Offset to use for the pci irq vector
|
||||
*
|
||||
* This function assumes the PCI device @pdev has at least as many available
|
||||
* interrupt vectors as @set has queues. It will then query the vector
|
||||
* corresponding to each queue for it's affinity mask and built queue mapping
|
||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||
* vector.
|
||||
*/
|
||||
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||
int offset)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = pci_irq_get_affinity(pdev, queue + offset);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
||||
blk_mq_clear_mq_map(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
@ -351,8 +351,7 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
|
||||
type = hctx->type;
|
||||
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
|
||||
list_empty_careful(&ctx->rq_lists[type]))
|
||||
if (list_empty_careful(&ctx->rq_lists[type]))
|
||||
goto out_put;
|
||||
|
||||
/* default per sw-queue merge */
|
||||
|
@ -544,30 +544,11 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
|
||||
node);
|
||||
}
|
||||
|
||||
int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
|
||||
struct sbitmap_queue *breserved_tags,
|
||||
unsigned int queue_depth, unsigned int reserved,
|
||||
int node, int alloc_policy)
|
||||
{
|
||||
unsigned int depth = queue_depth - reserved;
|
||||
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
|
||||
|
||||
if (bt_alloc(bitmap_tags, depth, round_robin, node))
|
||||
return -ENOMEM;
|
||||
if (bt_alloc(breserved_tags, reserved, round_robin, node))
|
||||
goto free_bitmap_tags;
|
||||
|
||||
return 0;
|
||||
|
||||
free_bitmap_tags:
|
||||
sbitmap_queue_free(bitmap_tags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
unsigned int reserved_tags,
|
||||
int node, int alloc_policy)
|
||||
unsigned int reserved_tags, unsigned int flags, int node)
|
||||
{
|
||||
unsigned int depth = total_tags - reserved_tags;
|
||||
bool round_robin = flags & BLK_MQ_F_TAG_RR;
|
||||
struct blk_mq_tags *tags;
|
||||
|
||||
if (total_tags > BLK_MQ_TAG_MAX) {
|
||||
@ -582,14 +563,18 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
tags->nr_tags = total_tags;
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
spin_lock_init(&tags->lock);
|
||||
if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
|
||||
goto out_free_tags;
|
||||
if (bt_alloc(&tags->breserved_tags, reserved_tags, round_robin, node))
|
||||
goto out_free_bitmap_tags;
|
||||
|
||||
if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
|
||||
total_tags, reserved_tags, node,
|
||||
alloc_policy) < 0) {
|
||||
return tags;
|
||||
|
||||
out_free_bitmap_tags:
|
||||
sbitmap_queue_free(&tags->bitmap_tags);
|
||||
out_free_tags:
|
||||
kfree(tags);
|
||||
return NULL;
|
||||
}
|
||||
return tags;
|
||||
}
|
||||
|
||||
void blk_mq_free_tags(struct blk_mq_tags *tags)
|
||||
|
@ -1,46 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/module.h>
|
||||
#include "blk-mq.h"
|
||||
|
||||
/**
|
||||
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
|
||||
* @qmap: CPU to hardware queue map.
|
||||
* @vdev: virtio device to provide a mapping for.
|
||||
* @first_vec: first interrupt vectors to use for queues (usually 0)
|
||||
*
|
||||
* This function assumes the virtio device @vdev has at least as many available
|
||||
* interrupt vectors as @set has queues. It will then query the vector
|
||||
* corresponding to each queue for it's affinity mask and built queue mapping
|
||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||
* vector.
|
||||
*/
|
||||
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||
struct virtio_device *vdev, int first_vec)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
if (!vdev->config->get_vq_affinity)
|
||||
goto fallback;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
blk_mq_map_queues(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
|
@ -131,6 +131,10 @@ static bool blk_freeze_set_owner(struct request_queue *q,
|
||||
if (!q->mq_freeze_depth) {
|
||||
q->mq_freeze_owner = owner;
|
||||
q->mq_freeze_owner_depth = 1;
|
||||
q->mq_freeze_disk_dead = !q->disk ||
|
||||
test_bit(GD_DEAD, &q->disk->state) ||
|
||||
!blk_queue_registered(q);
|
||||
q->mq_freeze_queue_dying = blk_queue_dying(q);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -142,8 +146,6 @@ static bool blk_freeze_set_owner(struct request_queue *q,
|
||||
/* verify the last unfreeze in owner context */
|
||||
static bool blk_unfreeze_check_owner(struct request_queue *q)
|
||||
{
|
||||
if (!q->mq_freeze_owner)
|
||||
return false;
|
||||
if (q->mq_freeze_owner != current)
|
||||
return false;
|
||||
if (--q->mq_freeze_owner_depth == 0) {
|
||||
@ -189,7 +191,7 @@ bool __blk_freeze_queue_start(struct request_queue *q,
|
||||
void blk_freeze_queue_start(struct request_queue *q)
|
||||
{
|
||||
if (__blk_freeze_queue_start(q, current))
|
||||
blk_freeze_acquire_lock(q, false, false);
|
||||
blk_freeze_acquire_lock(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
|
||||
|
||||
@ -237,7 +239,7 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
{
|
||||
if (__blk_mq_unfreeze_queue(q, false))
|
||||
blk_unfreeze_release_lock(q, false, false);
|
||||
blk_unfreeze_release_lock(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
|
||||
|
||||
@ -2656,8 +2658,10 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||
if (bio->bi_opf & REQ_RAHEAD)
|
||||
rq->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
|
||||
rq->bio = rq->biotail = bio;
|
||||
rq->__sector = bio->bi_iter.bi_sector;
|
||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||
rq->__data_len = bio->bi_iter.bi_size;
|
||||
rq->nr_phys_segments = nr_segs;
|
||||
if (bio_integrity(bio))
|
||||
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
|
||||
bio);
|
||||
@ -3472,8 +3476,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = set->numa_node;
|
||||
|
||||
tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
|
||||
BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
|
||||
tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node);
|
||||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
|
@ -163,11 +163,8 @@ struct blk_mq_alloc_data {
|
||||
};
|
||||
|
||||
struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
|
||||
unsigned int reserved_tags, int node, int alloc_policy);
|
||||
unsigned int reserved_tags, unsigned int flags, int node);
|
||||
void blk_mq_free_tags(struct blk_mq_tags *tags);
|
||||
int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
|
||||
struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
|
||||
unsigned int reserved, int node, int alloc_policy);
|
||||
|
||||
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
|
||||
unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
|
||||
|
@ -11,12 +11,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/mempool.h>
|
||||
@ -463,6 +459,8 @@ static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
|
||||
static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
lockdep_assert_held(&zwplug->lock);
|
||||
|
||||
/* If the zone write plug was already removed, we are done. */
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
|
||||
return false;
|
||||
@ -584,6 +582,7 @@ static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug,
|
||||
bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
|
||||
bio_io_error(bio);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
/* Drop the reference taken by disk_zone_wplug_add_bio(() */
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
||||
@ -895,10 +894,7 @@ void blk_zone_write_plug_init_request(struct request *req)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the extra reference on the queue usage we got when
|
||||
* plugging the BIO and advance the write pointer offset.
|
||||
*/
|
||||
/* Drop the reference taken by disk_zone_wplug_add_bio(). */
|
||||
blk_queue_exit(q);
|
||||
zwplug->wp_offset += bio_sectors(bio);
|
||||
|
||||
@ -917,6 +913,8 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
|
||||
lockdep_assert_held(&zwplug->lock);
|
||||
|
||||
/*
|
||||
* If we lost track of the zone write pointer due to a write error,
|
||||
* the user must either execute a report zones, reset the zone or finish
|
||||
@ -1776,24 +1774,14 @@ int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
|
||||
int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
||||
static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct gendisk *disk = q->disk;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned int zwp_wp_offset, zwp_flags;
|
||||
unsigned int zwp_zone_no, zwp_ref;
|
||||
unsigned int zwp_bio_list_size, i;
|
||||
unsigned int zwp_bio_list_size;
|
||||
unsigned long flags;
|
||||
|
||||
if (!disk->zone_wplugs_hash)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
|
||||
hlist_for_each_entry_rcu(zwplug,
|
||||
&disk->zone_wplugs_hash[i], node) {
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
zwp_zone_no = zwplug->zone_no;
|
||||
zwp_flags = zwplug->flags;
|
||||
@ -1802,11 +1790,25 @@ int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
||||
zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
seq_printf(m, "%u 0x%x %u %u %u\n",
|
||||
zwp_zone_no, zwp_flags, zwp_ref,
|
||||
seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref,
|
||||
zwp_wp_offset, zwp_bio_list_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct gendisk *disk = q->disk;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned int i;
|
||||
|
||||
if (!disk->zone_wplugs_hash)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
|
||||
hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i],
|
||||
node)
|
||||
queue_zone_wplug_show(zwplug, m);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
|
31
block/blk.h
31
block/blk.h
@ -556,14 +556,6 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
|
||||
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||
struct lock_class_key *lkclass);
|
||||
|
||||
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
|
||||
struct page *page, unsigned int len, unsigned int offset,
|
||||
unsigned int max_sectors, bool *same_page);
|
||||
|
||||
int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
|
||||
struct folio *folio, size_t len, size_t offset,
|
||||
unsigned int max_sectors, bool *same_page);
|
||||
|
||||
/*
|
||||
* Clean up a page appropriately, where the page may be pinned, may have a
|
||||
* ref taken on it or neither.
|
||||
@ -720,22 +712,29 @@ void blk_integrity_verify(struct bio *bio);
|
||||
void blk_integrity_prepare(struct request *rq);
|
||||
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
|
||||
|
||||
static inline void blk_freeze_acquire_lock(struct request_queue *q, bool
|
||||
disk_dead, bool queue_dying)
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static inline void blk_freeze_acquire_lock(struct request_queue *q)
|
||||
{
|
||||
if (!disk_dead)
|
||||
if (!q->mq_freeze_disk_dead)
|
||||
rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
|
||||
if (!queue_dying)
|
||||
if (!q->mq_freeze_queue_dying)
|
||||
rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
|
||||
}
|
||||
|
||||
static inline void blk_unfreeze_release_lock(struct request_queue *q, bool
|
||||
disk_dead, bool queue_dying)
|
||||
static inline void blk_unfreeze_release_lock(struct request_queue *q)
|
||||
{
|
||||
if (!queue_dying)
|
||||
if (!q->mq_freeze_queue_dying)
|
||||
rwsem_release(&q->q_lockdep_map, _RET_IP_);
|
||||
if (!disk_dead)
|
||||
if (!q->mq_freeze_disk_dead)
|
||||
rwsem_release(&q->io_lockdep_map, _RET_IP_);
|
||||
}
|
||||
#else
|
||||
static inline void blk_freeze_acquire_lock(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
static inline void blk_unfreeze_release_lock(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
@ -381,7 +381,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
set->queue_depth = 128;
|
||||
set->numa_node = NUMA_NO_NODE;
|
||||
set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||
set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
|
||||
set->flags = BLK_MQ_F_BLOCKING;
|
||||
if (blk_mq_alloc_tag_set(set))
|
||||
goto out_tag_set;
|
||||
|
||||
|
@ -405,12 +405,12 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
|
||||
#define to_elv(atr) container_of_const((atr), struct elv_fs_entry, attr)
|
||||
|
||||
static ssize_t
|
||||
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||
{
|
||||
struct elv_fs_entry *entry = to_elv(attr);
|
||||
const struct elv_fs_entry *entry = to_elv(attr);
|
||||
struct elevator_queue *e;
|
||||
ssize_t error;
|
||||
|
||||
@ -428,7 +428,7 @@ static ssize_t
|
||||
elv_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *page, size_t length)
|
||||
{
|
||||
struct elv_fs_entry *entry = to_elv(attr);
|
||||
const struct elv_fs_entry *entry = to_elv(attr);
|
||||
struct elevator_queue *e;
|
||||
ssize_t error;
|
||||
|
||||
@ -461,7 +461,7 @@ int elv_register_queue(struct request_queue *q, bool uevent)
|
||||
|
||||
error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
|
||||
if (!error) {
|
||||
struct elv_fs_entry *attr = e->type->elevator_attrs;
|
||||
const struct elv_fs_entry *attr = e->type->elevator_attrs;
|
||||
if (attr) {
|
||||
while (attr->attr.name) {
|
||||
if (sysfs_create_file(&e->kobj, &attr->attr))
|
||||
@ -547,14 +547,6 @@ void elv_unregister(struct elevator_type *e)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
|
||||
static inline bool elv_support_iosched(struct request_queue *q)
|
||||
{
|
||||
if (!queue_is_mq(q) ||
|
||||
(q->tag_set->flags & BLK_MQ_F_NO_SCHED))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* For single queue devices, default to using mq-deadline. If we have multiple
|
||||
* queues or mq-deadline is not available, default to "none".
|
||||
@ -580,9 +572,6 @@ void elevator_init_mq(struct request_queue *q)
|
||||
struct elevator_type *e;
|
||||
int err;
|
||||
|
||||
if (!elv_support_iosched(q))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(blk_queue_registered(q));
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
@ -601,16 +590,13 @@ void elevator_init_mq(struct request_queue *q)
|
||||
*
|
||||
* Disk isn't added yet, so verifying queue lock only manually.
|
||||
*/
|
||||
blk_freeze_queue_start_non_owner(q);
|
||||
blk_freeze_acquire_lock(q, true, false);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
blk_mq_freeze_queue(q);
|
||||
|
||||
blk_mq_cancel_work_sync(q);
|
||||
|
||||
err = blk_mq_init_sched(q, e);
|
||||
|
||||
blk_unfreeze_release_lock(q, true, false);
|
||||
blk_mq_unfreeze_queue_non_owner(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
if (err) {
|
||||
pr_warn("\"%s\" elevator initialization failed, "
|
||||
@ -717,9 +703,6 @@ void elv_iosched_load_module(struct gendisk *disk, const char *buf,
|
||||
struct elevator_type *found;
|
||||
const char *name;
|
||||
|
||||
if (!elv_support_iosched(disk->queue))
|
||||
return;
|
||||
|
||||
strscpy(elevator_name, buf, sizeof(elevator_name));
|
||||
name = strstrip(elevator_name);
|
||||
|
||||
@ -737,9 +720,6 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
|
||||
char elevator_name[ELV_NAME_MAX];
|
||||
int ret;
|
||||
|
||||
if (!elv_support_iosched(disk->queue))
|
||||
return count;
|
||||
|
||||
strscpy(elevator_name, buf, sizeof(elevator_name));
|
||||
ret = elevator_change(disk->queue, strstrip(elevator_name));
|
||||
if (!ret)
|
||||
@ -754,9 +734,6 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
|
||||
struct elevator_type *cur = NULL, *e;
|
||||
int len = 0;
|
||||
|
||||
if (!elv_support_iosched(q))
|
||||
return sprintf(name, "none\n");
|
||||
|
||||
if (!q->elevator) {
|
||||
len += sprintf(name+len, "[none] ");
|
||||
} else {
|
||||
|
@ -71,7 +71,7 @@ struct elevator_type
|
||||
|
||||
size_t icq_size; /* see iocontext.h */
|
||||
size_t icq_align; /* ditto */
|
||||
struct elv_fs_entry *elevator_attrs;
|
||||
const struct elv_fs_entry *elevator_attrs;
|
||||
const char *elevator_name;
|
||||
const char *elevator_alias;
|
||||
struct module *elevator_owner;
|
||||
|
45
block/fops.c
45
block/fops.c
@ -54,6 +54,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
struct bio bio;
|
||||
ssize_t ret;
|
||||
|
||||
WARN_ON_ONCE(iocb->ki_flags & IOCB_HAS_METADATA);
|
||||
if (nr_pages <= DIO_INLINE_BIO_VECS)
|
||||
vecs = inline_vecs;
|
||||
else {
|
||||
@ -124,12 +125,16 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
{
|
||||
struct blkdev_dio *dio = bio->bi_private;
|
||||
bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
|
||||
bool is_sync = dio->flags & DIO_IS_SYNC;
|
||||
|
||||
if (bio->bi_status && !dio->bio.bi_status)
|
||||
dio->bio.bi_status = bio->bi_status;
|
||||
|
||||
if (!is_sync && (dio->iocb->ki_flags & IOCB_HAS_METADATA))
|
||||
bio_integrity_unmap_user(bio);
|
||||
|
||||
if (atomic_dec_and_test(&dio->ref)) {
|
||||
if (!(dio->flags & DIO_IS_SYNC)) {
|
||||
if (!is_sync) {
|
||||
struct kiocb *iocb = dio->iocb;
|
||||
ssize_t ret;
|
||||
|
||||
@ -221,14 +226,16 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
* a retry of this from blocking context.
|
||||
*/
|
||||
if (unlikely(iov_iter_count(iter))) {
|
||||
bio_release_pages(bio, false);
|
||||
bio_clear_flag(bio, BIO_REFFED);
|
||||
bio_put(bio);
|
||||
blk_finish_plug(&plug);
|
||||
return -EAGAIN;
|
||||
ret = -EAGAIN;
|
||||
goto fail;
|
||||
}
|
||||
bio->bi_opf |= REQ_NOWAIT;
|
||||
}
|
||||
if (!is_sync && (iocb->ki_flags & IOCB_HAS_METADATA)) {
|
||||
ret = bio_integrity_map_iter(bio, iocb->private);
|
||||
if (unlikely(ret))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (is_read) {
|
||||
if (dio->flags & DIO_SHOULD_DIRTY)
|
||||
@ -269,6 +276,12 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
|
||||
bio_put(&dio->bio);
|
||||
return ret;
|
||||
fail:
|
||||
bio_release_pages(bio, false);
|
||||
bio_clear_flag(bio, BIO_REFFED);
|
||||
bio_put(bio);
|
||||
blk_finish_plug(&plug);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void blkdev_bio_end_io_async(struct bio *bio)
|
||||
@ -286,6 +299,9 @@ static void blkdev_bio_end_io_async(struct bio *bio)
|
||||
ret = blk_status_to_errno(bio->bi_status);
|
||||
}
|
||||
|
||||
if (iocb->ki_flags & IOCB_HAS_METADATA)
|
||||
bio_integrity_unmap_user(bio);
|
||||
|
||||
iocb->ki_complete(iocb, ret);
|
||||
|
||||
if (dio->flags & DIO_SHOULD_DIRTY) {
|
||||
@ -330,10 +346,8 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
bio_iov_bvec_set(bio, iter);
|
||||
} else {
|
||||
ret = bio_iov_iter_get_pages(bio, iter);
|
||||
if (unlikely(ret)) {
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
if (unlikely(ret))
|
||||
goto out_bio_put;
|
||||
}
|
||||
dio->size = bio->bi_iter.bi_size;
|
||||
|
||||
@ -346,6 +360,13 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
task_io_account_write(bio->bi_iter.bi_size);
|
||||
}
|
||||
|
||||
if (iocb->ki_flags & IOCB_HAS_METADATA) {
|
||||
ret = bio_integrity_map_iter(bio, iocb->private);
|
||||
WRITE_ONCE(iocb->private, NULL);
|
||||
if (unlikely(ret))
|
||||
goto out_bio_put;
|
||||
}
|
||||
|
||||
if (iocb->ki_flags & IOCB_ATOMIC)
|
||||
bio->bi_opf |= REQ_ATOMIC;
|
||||
|
||||
@ -360,6 +381,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
submit_bio(bio);
|
||||
}
|
||||
return -EIOCBQUEUED;
|
||||
|
||||
out_bio_put:
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
@ -400,21 +400,23 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
|
||||
struct device *ddev = disk_to_dev(disk);
|
||||
int ret;
|
||||
|
||||
/* Only makes sense for bio-based to set ->poll_bio */
|
||||
if (queue_is_mq(disk->queue) && disk->fops->poll_bio)
|
||||
if (queue_is_mq(disk->queue)) {
|
||||
/*
|
||||
* ->submit_bio and ->poll_bio are bypassed for blk-mq drivers.
|
||||
*/
|
||||
if (disk->fops->submit_bio || disk->fops->poll_bio)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The disk queue should now be all set with enough information about
|
||||
* the device for the elevator code to pick an adequate default
|
||||
* elevator if one is needed, that is, for devices requesting queue
|
||||
* registration.
|
||||
* Initialize the I/O scheduler code and pick a default one if
|
||||
* needed.
|
||||
*/
|
||||
elevator_init_mq(disk->queue);
|
||||
|
||||
/* Mark bdev as having a submit_bio, if needed */
|
||||
if (disk->fops->submit_bio)
|
||||
} else {
|
||||
if (!disk->fops->submit_bio)
|
||||
return -EINVAL;
|
||||
bdev_set_flag(disk->part0, BD_HAS_SUBMIT_BIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the driver provides an explicit major number it also must provide
|
||||
@ -661,7 +663,7 @@ void del_gendisk(struct gendisk *disk)
|
||||
struct request_queue *q = disk->queue;
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
bool start_drain, queue_dying;
|
||||
bool start_drain;
|
||||
|
||||
might_sleep();
|
||||
|
||||
@ -690,9 +692,8 @@ void del_gendisk(struct gendisk *disk)
|
||||
*/
|
||||
mutex_lock(&disk->open_mutex);
|
||||
start_drain = __blk_mark_disk_dead(disk);
|
||||
queue_dying = blk_queue_dying(q);
|
||||
if (start_drain)
|
||||
blk_freeze_acquire_lock(q, true, queue_dying);
|
||||
blk_freeze_acquire_lock(q);
|
||||
xa_for_each_start(&disk->part_tbl, idx, part, 1)
|
||||
drop_partition(part);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
@ -748,7 +749,7 @@ void del_gendisk(struct gendisk *disk)
|
||||
blk_mq_exit_queue(q);
|
||||
|
||||
if (start_drain)
|
||||
blk_unfreeze_release_lock(q, true, queue_dying);
|
||||
blk_unfreeze_release_lock(q);
|
||||
}
|
||||
EXPORT_SYMBOL(del_gendisk);
|
||||
|
||||
@ -798,7 +799,7 @@ static ssize_t disk_badblocks_store(struct device *dev,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
void blk_request_module(dev_t devt)
|
||||
static bool blk_probe_dev(dev_t devt)
|
||||
{
|
||||
unsigned int major = MAJOR(devt);
|
||||
struct blk_major_name **n;
|
||||
@ -808,14 +809,26 @@ void blk_request_module(dev_t devt)
|
||||
if ((*n)->major == major && (*n)->probe) {
|
||||
(*n)->probe(devt);
|
||||
mutex_unlock(&major_names_lock);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&major_names_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
|
||||
void blk_request_module(dev_t devt)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (blk_probe_dev(devt))
|
||||
return;
|
||||
|
||||
error = request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt));
|
||||
/* Make old-style 2.4 aliases work */
|
||||
request_module("block-major-%d", MAJOR(devt));
|
||||
if (error > 0)
|
||||
error = request_module("block-major-%d", MAJOR(devt));
|
||||
if (!error)
|
||||
blk_probe_dev(devt);
|
||||
}
|
||||
#endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */
|
||||
|
||||
|
@ -889,7 +889,7 @@ KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
|
||||
#undef KYBER_LAT_SHOW_STORE
|
||||
|
||||
#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
|
||||
static struct elv_fs_entry kyber_sched_attrs[] = {
|
||||
static const struct elv_fs_entry kyber_sched_attrs[] = {
|
||||
KYBER_LAT_ATTR(read),
|
||||
KYBER_LAT_ATTR(write),
|
||||
__ATTR_NULL
|
||||
|
@ -834,7 +834,7 @@ STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
|
||||
#define DD_ATTR(name) \
|
||||
__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
|
||||
|
||||
static struct elv_fs_entry deadline_attrs[] = {
|
||||
static const struct elv_fs_entry deadline_attrs[] = {
|
||||
DD_ATTR(read_expire),
|
||||
DD_ATTR(write_expire),
|
||||
DD_ATTR(writes_starved),
|
||||
|
@ -396,7 +396,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
|
||||
.shost_groups = ahci_shost_groups, \
|
||||
.sdev_groups = ahci_sdev_groups, \
|
||||
.change_queue_depth = ata_scsi_change_queue_depth, \
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
|
||||
.tag_alloc_policy_rr = true, \
|
||||
.device_configure = ata_scsi_device_configure
|
||||
|
||||
extern struct ata_port_operations ahci_ops;
|
||||
|
@ -935,7 +935,7 @@ static const struct scsi_host_template pata_macio_sht = {
|
||||
.device_configure = pata_macio_device_configure,
|
||||
.sdev_groups = ata_common_sdev_groups,
|
||||
.can_queue = ATA_DEF_QUEUE,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.tag_alloc_policy_rr = true,
|
||||
};
|
||||
|
||||
static struct ata_port_operations pata_macio_ops = {
|
||||
|
@ -672,7 +672,7 @@ static const struct scsi_host_template mv6_sht = {
|
||||
.dma_boundary = MV_DMA_BOUNDARY,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.tag_alloc_policy_rr = true,
|
||||
.device_configure = ata_scsi_device_configure
|
||||
};
|
||||
|
||||
|
@ -385,7 +385,7 @@ static const struct scsi_host_template nv_adma_sht = {
|
||||
.device_configure = nv_adma_device_configure,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.tag_alloc_policy_rr = true,
|
||||
};
|
||||
|
||||
static const struct scsi_host_template nv_swncq_sht = {
|
||||
@ -396,7 +396,7 @@ static const struct scsi_host_template nv_swncq_sht = {
|
||||
.device_configure = nv_swncq_device_configure,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.tag_alloc_policy_rr = true,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -378,7 +378,6 @@ static const struct scsi_host_template sil24_sht = {
|
||||
.can_queue = SIL24_MAX_CMDS,
|
||||
.sg_tablesize = SIL24_MAX_SGE,
|
||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.device_configure = ata_scsi_device_configure
|
||||
|
@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
|
||||
unit[drive].tag_set.nr_maps = 1;
|
||||
unit[drive].tag_set.queue_depth = 2;
|
||||
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
|
||||
unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
|
||||
goto out_cleanup_trackbuf;
|
||||
|
||||
|
@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
|
||||
set->nr_hw_queues = 1;
|
||||
set->queue_depth = 128;
|
||||
set->numa_node = NUMA_NO_NODE;
|
||||
set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
err = blk_mq_alloc_tag_set(set);
|
||||
if (err) {
|
||||
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
|
||||
|
@ -2088,7 +2088,6 @@ static int __init atari_floppy_init (void)
|
||||
unit[i].tag_set.nr_maps = 1;
|
||||
unit[i].tag_set.queue_depth = 2;
|
||||
unit[i].tag_set.numa_node = NUMA_NO_NODE;
|
||||
unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -4596,7 +4596,6 @@ static int __init do_floppy_init(void)
|
||||
tag_sets[drive].nr_maps = 1;
|
||||
tag_sets[drive].queue_depth = 2;
|
||||
tag_sets[drive].numa_node = NUMA_NO_NODE;
|
||||
tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
|
||||
if (err)
|
||||
goto out_put_disk;
|
||||
|
@ -2023,8 +2023,7 @@ static int loop_add(int i)
|
||||
lo->tag_set.queue_depth = hw_queue_depth;
|
||||
lo->tag_set.numa_node = NUMA_NO_NODE;
|
||||
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
|
||||
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
|
||||
BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
lo->tag_set.driver_data = lo;
|
||||
|
||||
err = blk_mq_alloc_tag_set(&lo->tag_set);
|
||||
|
@ -3416,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||
dd->tags.reserved_tags = 1;
|
||||
dd->tags.cmd_size = sizeof(struct mtip_cmd);
|
||||
dd->tags.numa_node = dd->numa_node;
|
||||
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
dd->tags.driver_data = dd;
|
||||
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
|
||||
|
||||
|
@ -1841,8 +1841,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
|
||||
nbd->tag_set.queue_depth = 128;
|
||||
nbd->tag_set.numa_node = NUMA_NO_NODE;
|
||||
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
|
||||
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
|
||||
BLK_MQ_F_BLOCKING;
|
||||
nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
|
||||
nbd->tag_set.driver_data = nbd;
|
||||
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
|
||||
nbd->backend = NULL;
|
||||
@ -2180,6 +2179,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
nbd_clear_que(nbd);
|
||||
nbd->task_setup = NULL;
|
||||
clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
|
||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||
|
@ -266,6 +266,10 @@ static bool g_zone_full;
|
||||
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
|
||||
|
||||
static bool g_rotational;
|
||||
module_param_named(rotational, g_rotational, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
|
||||
|
||||
static struct nullb_device *null_alloc_dev(void);
|
||||
static void null_free_dev(struct nullb_device *dev);
|
||||
static void null_del_dev(struct nullb *nullb);
|
||||
@ -468,6 +472,7 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
|
||||
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
|
||||
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
|
||||
NULLB_DEVICE_ATTR(fua, bool, NULL);
|
||||
NULLB_DEVICE_ATTR(rotational, bool, NULL);
|
||||
|
||||
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
|
||||
{
|
||||
@ -621,6 +626,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
|
||||
&nullb_device_attr_shared_tags,
|
||||
&nullb_device_attr_shared_tag_bitmap,
|
||||
&nullb_device_attr_fua,
|
||||
&nullb_device_attr_rotational,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -706,7 +712,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
|
||||
"shared_tags,size,submit_queues,use_per_node_hctx,"
|
||||
"virt_boundary,zoned,zone_capacity,zone_max_active,"
|
||||
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
|
||||
"zone_size,zone_append_max_sectors,zone_full\n");
|
||||
"zone_size,zone_append_max_sectors,zone_full,"
|
||||
"rotational\n");
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR_RO(memb_group_, features);
|
||||
@ -793,6 +800,7 @@ static struct nullb_device *null_alloc_dev(void)
|
||||
dev->shared_tags = g_shared_tags;
|
||||
dev->shared_tag_bitmap = g_shared_tag_bitmap;
|
||||
dev->fua = g_fua;
|
||||
dev->rotational = g_rotational;
|
||||
|
||||
return dev;
|
||||
}
|
||||
@ -899,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
|
||||
if (radix_tree_insert(root, idx, t_page)) {
|
||||
null_free_page(t_page);
|
||||
t_page = radix_tree_lookup(root, idx);
|
||||
WARN_ON(!t_page || t_page->page->index != idx);
|
||||
WARN_ON(!t_page || t_page->page->private != idx);
|
||||
} else if (is_cache)
|
||||
nullb->dev->curr_cache += PAGE_SIZE;
|
||||
|
||||
@ -922,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
|
||||
(void **)t_pages, pos, FREE_BATCH);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
pos = t_pages[i]->page->index;
|
||||
pos = t_pages[i]->page->private;
|
||||
ret = radix_tree_delete_item(root, pos, t_pages[i]);
|
||||
WARN_ON(ret != t_pages[i]);
|
||||
null_free_page(ret);
|
||||
@ -948,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
|
||||
|
||||
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
|
||||
t_page = radix_tree_lookup(root, idx);
|
||||
WARN_ON(t_page && t_page->page->index != idx);
|
||||
WARN_ON(t_page && t_page->page->private != idx);
|
||||
|
||||
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
|
||||
return t_page;
|
||||
@ -991,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
|
||||
|
||||
spin_lock_irq(&nullb->lock);
|
||||
idx = sector >> PAGE_SECTORS_SHIFT;
|
||||
t_page->page->index = idx;
|
||||
t_page->page->private = idx;
|
||||
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
|
||||
radix_tree_preload_end();
|
||||
|
||||
@ -1011,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
|
||||
struct nullb_page *t_page, *ret;
|
||||
void *dst, *src;
|
||||
|
||||
idx = c_page->page->index;
|
||||
idx = c_page->page->private;
|
||||
|
||||
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
|
||||
|
||||
@ -1070,7 +1078,7 @@ again:
|
||||
* avoid race, we don't allow page free
|
||||
*/
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
nullb->cache_flush_pos = c_pages[i]->page->index;
|
||||
nullb->cache_flush_pos = c_pages[i]->page->private;
|
||||
/*
|
||||
* We found the page which is being flushed to disk by other
|
||||
* threads
|
||||
@ -1783,9 +1791,8 @@ static int null_init_global_tag_set(void)
|
||||
tag_set.nr_hw_queues = g_submit_queues;
|
||||
tag_set.queue_depth = g_hw_queue_depth;
|
||||
tag_set.numa_node = g_home_node;
|
||||
tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
if (g_no_sched)
|
||||
tag_set.flags |= BLK_MQ_F_NO_SCHED;
|
||||
tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
if (g_shared_tag_bitmap)
|
||||
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
if (g_blocking)
|
||||
@ -1809,9 +1816,8 @@ static int null_setup_tagset(struct nullb *nullb)
|
||||
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
|
||||
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
|
||||
nullb->tag_set->numa_node = nullb->dev->home_node;
|
||||
nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
if (nullb->dev->no_sched)
|
||||
nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
|
||||
nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
if (nullb->dev->shared_tag_bitmap)
|
||||
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
if (nullb->dev->blocking)
|
||||
@ -1938,6 +1944,9 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
lim.features |= BLK_FEAT_FUA;
|
||||
}
|
||||
|
||||
if (dev->rotational)
|
||||
lim.features |= BLK_FEAT_ROTATIONAL;
|
||||
|
||||
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
|
||||
if (IS_ERR(nullb->disk)) {
|
||||
rv = PTR_ERR(nullb->disk);
|
||||
|
@ -107,6 +107,7 @@ struct nullb_device {
|
||||
bool shared_tags; /* share tag set between devices for blk-mq */
|
||||
bool shared_tag_bitmap; /* use hostwide shared tags */
|
||||
bool fua; /* Support FUA */
|
||||
bool rotational; /* Fake rotational device */
|
||||
};
|
||||
|
||||
struct nullb {
|
||||
|
@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
|
||||
unsigned int devidx;
|
||||
struct queue_limits lim = {
|
||||
.logical_block_size = dev->blk_size,
|
||||
.max_hw_sectors = dev->bounce_size >> 9,
|
||||
.max_hw_sectors = BOUNCE_SIZE >> 9,
|
||||
.max_segments = -1,
|
||||
.max_segment_size = dev->bounce_size,
|
||||
.max_segment_size = BOUNCE_SIZE,
|
||||
.dma_alignment = dev->blk_size - 1,
|
||||
.features = BLK_FEAT_WRITE_CACHE |
|
||||
BLK_FEAT_ROTATIONAL,
|
||||
@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
|
||||
|
||||
ps3disk_identify(dev);
|
||||
|
||||
error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
|
||||
if (error)
|
||||
goto fail_teardown;
|
||||
|
||||
|
@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
||||
rbd_dev->tag_set.ops = &rbd_mq_ops;
|
||||
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
|
||||
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
|
||||
rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
|
||||
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
|
||||
|
||||
|
@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
|
||||
tag_set->ops = &rnbd_mq_ops;
|
||||
tag_set->queue_depth = sess->queue_depth;
|
||||
tag_set->numa_node = NUMA_NO_NODE;
|
||||
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
|
||||
BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||
tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
|
||||
|
||||
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
|
||||
|
@ -167,7 +167,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
|
||||
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
|
||||
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
|
||||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
|
||||
bio_set_prio(bio, prio);
|
||||
bio->bi_ioprio = prio;
|
||||
|
||||
submit_bio(bio);
|
||||
|
||||
|
@ -32,25 +32,31 @@ module! {
|
||||
license: "GPL v2",
|
||||
}
|
||||
|
||||
#[pin_data]
|
||||
struct NullBlkModule {
|
||||
_disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>,
|
||||
#[pin]
|
||||
_disk: Mutex<GenDisk<NullBlkDevice>>,
|
||||
}
|
||||
|
||||
impl kernel::Module for NullBlkModule {
|
||||
fn init(_module: &'static ThisModule) -> Result<Self> {
|
||||
impl kernel::InPlaceModule for NullBlkModule {
|
||||
fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
|
||||
pr_info!("Rust null_blk loaded\n");
|
||||
|
||||
// Use a immediately-called closure as a stable `try` block
|
||||
let disk = /* try */ (|| {
|
||||
let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
|
||||
|
||||
let disk = gen_disk::GenDiskBuilder::new()
|
||||
gen_disk::GenDiskBuilder::new()
|
||||
.capacity_sectors(4096 << 11)
|
||||
.logical_block_size(4096)?
|
||||
.physical_block_size(4096)?
|
||||
.rotational(false)
|
||||
.build(format_args!("rnullb{}", 0), tagset)?;
|
||||
.build(format_args!("rnullb{}", 0), tagset)
|
||||
})();
|
||||
|
||||
let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
|
||||
|
||||
Ok(Self { _disk: disk })
|
||||
try_pin_init!(Self {
|
||||
_disk <- new_mutex!(disk?, "nullb:disk"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port)
|
||||
}
|
||||
|
||||
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
|
||||
VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
|
||||
VDC_TX_RING_SIZE, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
|
||||
|
||||
for (drive = 0; drive < swd->floppy_count; drive++) {
|
||||
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
|
||||
&swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
|
||||
&swim_mq_ops, 2, 0);
|
||||
if (err)
|
||||
goto exit_put_disks;
|
||||
|
||||
|
@ -1208,8 +1208,7 @@ static int swim3_attach(struct macio_dev *mdev,
|
||||
fs = &floppy_states[floppy_count];
|
||||
memset(fs, 0, sizeof(*fs));
|
||||
|
||||
rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
|
||||
if (rc)
|
||||
goto out_unregister;
|
||||
|
||||
|
@ -2213,7 +2213,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
|
||||
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
|
||||
ub->tag_set.numa_node = NUMA_NO_NODE;
|
||||
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
|
||||
ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ub->tag_set.driver_data = ub;
|
||||
return blk_mq_alloc_tag_set(&ub->tag_set);
|
||||
}
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <uapi/linux/virtio_ring.h>
|
||||
@ -1181,7 +1180,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(&set->map[i]);
|
||||
else
|
||||
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
|
||||
blk_mq_map_hw_queues(&set->map[i],
|
||||
&vblk->vdev->dev, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1481,7 +1481,6 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
vblk->tag_set.ops = &virtio_mq_ops;
|
||||
vblk->tag_set.queue_depth = queue_depth;
|
||||
vblk->tag_set.numa_node = NUMA_NO_NODE;
|
||||
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
vblk->tag_set.cmd_size =
|
||||
sizeof(struct virtblk_req) +
|
||||
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
|
||||
|
@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
} else
|
||||
info->tag_set.queue_depth = BLK_RING_SIZE(info);
|
||||
info->tag_set.numa_node = NUMA_NO_NODE;
|
||||
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
info->tag_set.cmd_size = sizeof(struct blkif_req);
|
||||
info->tag_set.driver_data = info;
|
||||
|
||||
|
@ -354,7 +354,6 @@ static int __init z2_init(void)
|
||||
tag_set.nr_maps = 1;
|
||||
tag_set.queue_depth = 16;
|
||||
tag_set.numa_node = NUMA_NO_NODE;
|
||||
tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ret = blk_mq_alloc_tag_set(&tag_set);
|
||||
if (ret)
|
||||
goto out_unregister_blkdev;
|
||||
|
@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
|
||||
probe_gdrom_setupcd();
|
||||
|
||||
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
|
||||
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
|
||||
BLK_MQ_F_BLOCKING);
|
||||
if (err)
|
||||
goto probe_fail_free_cd_info;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
|
||||
bio_get(bio);
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
|
||||
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
|
||||
bio->bi_private = &io->cl;
|
||||
|
@ -334,7 +334,7 @@ static void dirty_init(struct keybuf_key *w)
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
|
||||
if (!io->dc->writeback_percent)
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
|
||||
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
|
||||
bio->bi_private = w;
|
||||
|
@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||
md->tag_set->ops = &dm_mq_ops;
|
||||
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
|
||||
md->tag_set->numa_node = md->numa_node_id;
|
||||
md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
|
||||
md->tag_set->flags = BLK_MQ_F_STACKING;
|
||||
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
|
||||
md->tag_set->driver_data = md;
|
||||
|
||||
|
@ -122,7 +122,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset,
|
||||
par_buf_offset, &buf, bio_prio(bio));
|
||||
par_buf_offset, &buf, bio->bi_ioprio);
|
||||
if (IS_ERR(par))
|
||||
return PTR_ERR(par);
|
||||
|
||||
@ -164,7 +164,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
dm_bufio_release(buf);
|
||||
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset,
|
||||
par_buf_offset, &buf, bio_prio(bio));
|
||||
par_buf_offset, &buf, bio->bi_ioprio);
|
||||
if (IS_ERR(par))
|
||||
return PTR_ERR(par);
|
||||
}
|
||||
@ -254,7 +254,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
bufio = v->bufio;
|
||||
}
|
||||
|
||||
bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
|
||||
bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio->bi_ioprio);
|
||||
if (IS_ERR(bbuf)) {
|
||||
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
|
||||
v->data_dev->name,
|
||||
|
@ -321,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||
}
|
||||
} else {
|
||||
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
|
||||
&buf, bio_prio(bio));
|
||||
&buf, bio->bi_ioprio);
|
||||
}
|
||||
|
||||
if (IS_ERR(data))
|
||||
@ -789,7 +789,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
verity_fec_init_io(io);
|
||||
|
||||
verity_submit_prefetch(v, io, bio_prio(bio));
|
||||
verity_submit_prefetch(v, io, bio->bi_ioprio);
|
||||
|
||||
submit_bio_noacct(bio);
|
||||
|
||||
|
@ -2094,8 +2094,7 @@ static int msb_init_disk(struct memstick_dev *card)
|
||||
if (msb->disk_id < 0)
|
||||
return msb->disk_id;
|
||||
|
||||
rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0);
|
||||
if (rc)
|
||||
goto out_release_id;
|
||||
|
||||
|
@ -1139,8 +1139,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
|
||||
if (disk_id < 0)
|
||||
return disk_id;
|
||||
|
||||
rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0);
|
||||
if (rc)
|
||||
goto out_release_id;
|
||||
|
||||
|
@ -441,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
||||
else
|
||||
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
|
||||
mq->tag_set.numa_node = NUMA_NO_NODE;
|
||||
mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
|
||||
mq->tag_set.flags = BLK_MQ_F_BLOCKING;
|
||||
mq->tag_set.nr_hw_queues = 1;
|
||||
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
|
||||
mq->tag_set.driver_data = mq;
|
||||
|
@ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
|
||||
goto out_list_del;
|
||||
|
||||
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
|
||||
BLK_MQ_F_BLOCKING);
|
||||
if (ret)
|
||||
goto out_kfree_tag_set;
|
||||
|
||||
|
@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
|
||||
dev->tag_set.ops = &ubiblock_mq_ops;
|
||||
dev->tag_set.queue_depth = 64;
|
||||
dev->tag_set.numa_node = NUMA_NO_NODE;
|
||||
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
|
||||
dev->tag_set.flags = BLK_MQ_F_BLOCKING;
|
||||
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
|
||||
dev->tag_set.driver_data = dev;
|
||||
dev->tag_set.nr_hw_queues = 1;
|
||||
|
@ -1251,7 +1251,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
|
||||
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
|
||||
anv->admin_tagset.numa_node = NUMA_NO_NODE;
|
||||
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
|
||||
anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
|
||||
anv->admin_tagset.driver_data = &anv->adminq;
|
||||
|
||||
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
|
||||
@ -1275,7 +1274,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
|
||||
anv->tagset.timeout = NVME_IO_TIMEOUT;
|
||||
anv->tagset.numa_node = NUMA_NO_NODE;
|
||||
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
|
||||
anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
anv->tagset.driver_data = &anv->ioq;
|
||||
|
||||
ret = blk_mq_alloc_tag_set(&anv->tagset);
|
||||
|
@ -885,6 +885,12 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
|
||||
{
|
||||
cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
|
||||
cmnd->rw.lbatm = cpu_to_le16(0xffff);
|
||||
}
|
||||
|
||||
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
|
||||
struct request *req)
|
||||
{
|
||||
@ -1017,18 +1023,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
||||
control |= NVME_RW_PRINFO_PRACT;
|
||||
}
|
||||
|
||||
switch (ns->head->pi_type) {
|
||||
case NVME_NS_DPS_PI_TYPE3:
|
||||
if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
|
||||
control |= NVME_RW_PRINFO_PRCHK_GUARD;
|
||||
break;
|
||||
case NVME_NS_DPS_PI_TYPE1:
|
||||
case NVME_NS_DPS_PI_TYPE2:
|
||||
control |= NVME_RW_PRINFO_PRCHK_GUARD |
|
||||
NVME_RW_PRINFO_PRCHK_REF;
|
||||
if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
|
||||
control |= NVME_RW_PRINFO_PRCHK_REF;
|
||||
if (op == nvme_cmd_zone_append)
|
||||
control |= NVME_RW_APPEND_PIREMAP;
|
||||
nvme_set_ref_tag(ns, cmnd, req);
|
||||
break;
|
||||
}
|
||||
if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
|
||||
control |= NVME_RW_PRINFO_PRCHK_APP;
|
||||
nvme_set_app_tag(req, cmnd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4564,7 +4569,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
|
||||
/* Reserved for fabric connect and keep alive */
|
||||
set->reserved_tags = 2;
|
||||
set->numa_node = ctrl->numa_node;
|
||||
set->flags = BLK_MQ_F_NO_SCHED;
|
||||
if (ctrl->ops->flags & NVME_F_BLOCKING)
|
||||
set->flags |= BLK_MQ_F_BLOCKING;
|
||||
set->cmd_size = cmd_size;
|
||||
@ -4639,7 +4643,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
|
||||
/* Reserved for fabric connect */
|
||||
set->reserved_tags = 1;
|
||||
set->numa_node = ctrl->numa_node;
|
||||
set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
if (ctrl->ops->flags & NVME_F_BLOCKING)
|
||||
set->flags |= BLK_MQ_F_BLOCKING;
|
||||
set->cmd_size = cmd_size;
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/nvme-fc.h>
|
||||
#include "fc.h"
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
|
||||
/* *************************** Data Structures/Defines ****************** */
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/async.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/blk-integrity.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/init.h>
|
||||
@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL && offset)
|
||||
blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
|
||||
blk_mq_map_hw_queues(map, dev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
qoff += map->nr_queues;
|
||||
|
@ -261,6 +261,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct bio *bio;
|
||||
int ret = -EINVAL;
|
||||
int i;
|
||||
|
||||
if (req->sg_cnt > BIO_MAX_VECS)
|
||||
@ -277,16 +278,19 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
}
|
||||
|
||||
for_each_sg(req->sg, sg, req->sg_cnt, i) {
|
||||
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
|
||||
sg->offset) < sg->length) {
|
||||
nvmet_req_bio_put(req, bio);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) <
|
||||
sg->length)
|
||||
goto out_bio_put;
|
||||
}
|
||||
|
||||
blk_rq_bio_prep(rq, bio, req->sg_cnt);
|
||||
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (ret)
|
||||
goto out_bio_put;
|
||||
return 0;
|
||||
|
||||
out_bio_put:
|
||||
nvmet_req_bio_put(req, bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
||||
|
@ -586,8 +586,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
|
||||
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
|
||||
unsigned int len = sg->length;
|
||||
|
||||
if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
|
||||
sg_page(sg), len, sg->offset) != len) {
|
||||
if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out_put_bio;
|
||||
}
|
||||
|
@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
|
||||
iommu_device_unuse_default_domain(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* pci_device_irq_get_affinity - get IRQ affinity mask for device
|
||||
* @dev: ptr to dev structure
|
||||
* @irq_vec: interrupt vector number
|
||||
*
|
||||
* Return the CPU affinity mask for @dev and @irq_vec.
|
||||
*/
|
||||
static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
|
||||
unsigned int irq_vec)
|
||||
{
|
||||
return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
|
||||
}
|
||||
|
||||
const struct bus_type pci_bus_type = {
|
||||
.name = "pci",
|
||||
.match = pci_bus_match,
|
||||
@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
|
||||
.probe = pci_device_probe,
|
||||
.remove = pci_device_remove,
|
||||
.shutdown = pci_device_shutdown,
|
||||
.irq_get_affinity = pci_device_irq_get_affinity,
|
||||
.dev_groups = pci_dev_groups,
|
||||
.bus_groups = pci_bus_groups,
|
||||
.drv_groups = pci_drv_groups,
|
||||
|
@ -56,7 +56,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
||||
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
|
||||
block->tag_set.nr_hw_queues = nr_hw_queues;
|
||||
block->tag_set.queue_depth = queue_depth;
|
||||
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
block->tag_set.numa_node = NUMA_NO_NODE;
|
||||
rc = blk_mq_alloc_tag_set(&block->tag_set);
|
||||
if (rc)
|
||||
|
@ -461,7 +461,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
||||
bdev->tag_set.cmd_size = sizeof(blk_status_t);
|
||||
bdev->tag_set.nr_hw_queues = nr_requests;
|
||||
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
|
||||
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
bdev->tag_set.numa_node = NUMA_NO_NODE;
|
||||
|
||||
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <scsi/fc/fc_fip.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
|
||||
return;
|
||||
}
|
||||
|
||||
blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
|
||||
blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
|
||||
}
|
||||
|
||||
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dmapool.h>
|
||||
|
@ -3328,7 +3328,7 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(qmap);
|
||||
else
|
||||
blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
|
||||
blk_mq_map_hw_queues(qmap, hisi_hba->dev,
|
||||
BASE_VECTORS_V3_HW);
|
||||
qoff += qmap->nr_queues;
|
||||
}
|
||||
@ -3345,7 +3345,7 @@ static const struct scsi_host_template sht_v3_hw = {
|
||||
.slave_alloc = hisi_sas_slave_alloc,
|
||||
.shost_groups = host_v3_hw_groups,
|
||||
.sdev_groups = sdev_groups_v3_hw,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.tag_alloc_policy_rr = true,
|
||||
.host_reset = hisi_sas_host_reset,
|
||||
.host_tagset = 1,
|
||||
.mq_poll = queue_complete_v3_hw,
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include <linux/poll.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/irq_poll.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
@ -3193,7 +3192,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
|
||||
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||
map->nr_queues = instance->msix_vectors - offset;
|
||||
map->queue_offset = 0;
|
||||
blk_mq_pci_map_queues(map, instance->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
|
||||
qoff += map->nr_queues;
|
||||
offset += map->nr_queues;
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL)
|
||||
blk_mq_pci_map_queues(map, mrioc->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
|
@ -53,7 +53,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/raid_class.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/unaligned.h>
|
||||
|
||||
#include "mpt3sas_base.h"
|
||||
@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL)
|
||||
blk_mq_pci_map_queues(map, ioc->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
|
||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||
|
||||
if (pm8001_ha->number_of_intr > 1) {
|
||||
blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
|
||||
blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <scsi/sas_ata.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include "pm8001_defs.h"
|
||||
|
||||
#define DRV_NAME "pm80xx"
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/nvme.h>
|
||||
#include <linux/nvme-fc.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/blk-mq.h>
|
||||
|
||||
static struct nvme_fc_port_template qla_nvme_fc_transport;
|
||||
@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
|
||||
{
|
||||
struct scsi_qla_host *vha = lport->private;
|
||||
|
||||
blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
|
||||
blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
|
||||
}
|
||||
|
||||
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/trace_events.h>
|
||||
@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
|
||||
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
|
||||
blk_mq_map_queues(qmap);
|
||||
else
|
||||
blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
|
||||
blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
|
||||
vha->irq_offset);
|
||||
}
|
||||
|
||||
struct scsi_host_template qla2xxx_driver_template = {
|
||||
|
@ -2068,9 +2068,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
|
||||
tag_set->queue_depth = shost->can_queue;
|
||||
tag_set->cmd_size = cmd_size;
|
||||
tag_set->numa_node = dev_to_node(shost->dma_dev);
|
||||
tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
tag_set->flags |=
|
||||
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
|
||||
if (shost->hostt->tag_alloc_policy_rr)
|
||||
tag_set->flags |= BLK_MQ_F_TAG_RR;
|
||||
if (shost->queuecommand_may_block)
|
||||
tag_set->flags |= BLK_MQ_F_BLOCKING;
|
||||
tag_set->driver_data = shost;
|
||||
|
@ -814,14 +814,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
|
||||
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
|
||||
scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
|
||||
|
||||
if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
|
||||
if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
|
||||
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
|
||||
}
|
||||
|
||||
if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
|
||||
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
|
||||
|
||||
if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
|
||||
if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
|
||||
scmd->prot_flags |= SCSI_PROT_REF_CHECK;
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/cciss_ioctl.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
@ -6547,10 +6546,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
|
||||
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
|
||||
|
||||
if (!ctrl_info->disable_managed_interrupts)
|
||||
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
ctrl_info->pci_dev, 0);
|
||||
blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
&ctrl_info->pci_dev->dev, 0);
|
||||
else
|
||||
return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
|
||||
blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
|
||||
}
|
||||
|
||||
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsi_devinfo.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
|
||||
#include "sd.h"
|
||||
|
||||
@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(map);
|
||||
else
|
||||
blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
|
||||
blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -823,7 +823,6 @@ static sense_reason_t
|
||||
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
struct request *req)
|
||||
{
|
||||
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
|
||||
struct bio *bio = NULL;
|
||||
struct page *page;
|
||||
struct scatterlist *sg;
|
||||
@ -871,12 +870,11 @@ new_bio:
|
||||
(rw) ? "rw" : "r", nr_vecs);
|
||||
}
|
||||
|
||||
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
|
||||
pr_debug("PSCSI: Calling bio_add_page() i: %d"
|
||||
" bio: %p page: %p len: %d off: %d\n", i, bio,
|
||||
page, len, off);
|
||||
|
||||
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
|
||||
bio, page, bytes, off);
|
||||
rc = bio_add_page(bio, page, bytes, off);
|
||||
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
|
||||
bio_segments(bio), nr_vecs);
|
||||
if (rc != bytes) {
|
||||
|
@ -10411,7 +10411,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
|
||||
.nr_hw_queues = 1,
|
||||
.queue_depth = hba->nutmrs,
|
||||
.ops = &ufshcd_tmf_ops,
|
||||
.flags = BLK_MQ_F_NO_SCHED,
|
||||
};
|
||||
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
|
||||
if (err < 0)
|
||||
|
@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d)
|
||||
of_node_put(dev->dev.of_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* virtio_irq_get_affinity - get IRQ affinity mask for device
|
||||
* @_d: ptr to dev structure
|
||||
* @irq_vec: interrupt vector number
|
||||
*
|
||||
* Return the CPU affinity mask for @_d and @irq_vec.
|
||||
*/
|
||||
static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
|
||||
unsigned int irq_vec)
|
||||
{
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
|
||||
if (!dev->config->get_vq_affinity)
|
||||
return NULL;
|
||||
|
||||
return dev->config->get_vq_affinity(dev, irq_vec);
|
||||
}
|
||||
|
||||
static const struct bus_type virtio_bus = {
|
||||
.name = "virtio",
|
||||
.match = virtio_dev_match,
|
||||
@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = {
|
||||
.uevent = virtio_uevent,
|
||||
.probe = virtio_dev_probe,
|
||||
.remove = virtio_dev_remove,
|
||||
.irq_get_affinity = virtio_irq_get_affinity,
|
||||
};
|
||||
|
||||
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
|
||||
|
@ -301,8 +301,8 @@ int bch2_move_extent(struct moving_context *ctxt,
|
||||
io->write_sectors = k.k->size;
|
||||
|
||||
bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
|
||||
bio_set_prio(&io->write.op.wbio.bio,
|
||||
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
io->write.op.wbio.bio.bi_ioprio =
|
||||
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
|
||||
|
||||
if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
|
||||
GFP_KERNEL))
|
||||
@ -312,7 +312,7 @@ int bch2_move_extent(struct moving_context *ctxt,
|
||||
io->rbio.opts = io_opts;
|
||||
bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
|
||||
io->rbio.bio.bi_vcnt = pages;
|
||||
bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
io->rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
|
||||
io->rbio.bio.bi_iter.bi_size = sectors << 9;
|
||||
|
||||
io->rbio.bio.bi_opf = REQ_OP_READ;
|
||||
|
@ -7,10 +7,12 @@
|
||||
enum bip_flags {
|
||||
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
|
||||
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
|
||||
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
|
||||
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
|
||||
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
|
||||
BIP_COPY_USER = 1 << 5, /* Kernel bounce buffer in use */
|
||||
BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */
|
||||
BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */
|
||||
BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */
|
||||
BIP_CHECK_GUARD = 1 << 5, /* guard check */
|
||||
BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
|
||||
BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
|
||||
};
|
||||
|
||||
struct bio_integrity_payload {
|
||||
@ -21,6 +23,7 @@ struct bio_integrity_payload {
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned short bip_flags; /* control flags */
|
||||
u16 app_tag; /* application tag value */
|
||||
|
||||
struct bvec_iter bio_iter; /* for rewinding parent bio */
|
||||
|
||||
@ -30,6 +33,9 @@ struct bio_integrity_payload {
|
||||
struct bio_vec bip_inline_vecs[];/* embedded bvec array */
|
||||
};
|
||||
|
||||
#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \
|
||||
BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
|
||||
#define bip_for_each_vec(bvl, bip, iter) \
|
||||
@ -72,7 +78,8 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
|
||||
unsigned int nr);
|
||||
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||
unsigned int offset);
|
||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len);
|
||||
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
|
||||
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
|
||||
void bio_integrity_unmap_user(struct bio *bio);
|
||||
bool bio_integrity_prep(struct bio *bio);
|
||||
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
|
||||
@ -98,8 +105,12 @@ static inline void bioset_integrity_free(struct bio_set *bs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
|
||||
ssize_t len)
|
||||
static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -19,9 +19,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
|
||||
return min(nr_segs, BIO_MAX_VECS);
|
||||
}
|
||||
|
||||
#define bio_prio(bio) (bio)->bi_ioprio
|
||||
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
|
||||
|
||||
#define bio_iter_iovec(bio, iter) \
|
||||
bvec_iter_bvec((bio)->bi_io_vec, (iter))
|
||||
|
||||
@ -416,8 +413,6 @@ int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
|
||||
unsigned off);
|
||||
bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
|
||||
size_t len, size_t off);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
void __bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
|
||||
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BLK_MQ_PCI_H
|
||||
#define _LINUX_BLK_MQ_PCI_H
|
||||
|
||||
struct blk_mq_queue_map;
|
||||
struct pci_dev;
|
||||
|
||||
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||
int offset);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_PCI_H */
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BLK_MQ_VIRTIO_H
|
||||
#define _LINUX_BLK_MQ_VIRTIO_H
|
||||
|
||||
struct blk_mq_queue_map;
|
||||
struct virtio_device;
|
||||
|
||||
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||
struct virtio_device *vdev, int first_vec);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
|
@ -296,13 +296,6 @@ enum blk_eh_timer_return {
|
||||
BLK_EH_RESET_TIMER,
|
||||
};
|
||||
|
||||
/* Keep alloc_policy_name[] in sync with the definitions below */
|
||||
enum {
|
||||
BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */
|
||||
BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */
|
||||
BLK_TAG_ALLOC_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
|
||||
* block device
|
||||
@ -668,7 +661,6 @@ struct blk_mq_ops {
|
||||
|
||||
/* Keep hctx_flag_name[] in sync with the definitions below */
|
||||
enum {
|
||||
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
||||
BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
|
||||
/*
|
||||
* Set when this device requires underlying blk-mq device for
|
||||
@ -677,23 +669,20 @@ enum {
|
||||
BLK_MQ_F_STACKING = 1 << 2,
|
||||
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
|
||||
BLK_MQ_F_BLOCKING = 1 << 4,
|
||||
/* Do not allow an I/O scheduler to be configured. */
|
||||
BLK_MQ_F_NO_SCHED = 1 << 5,
|
||||
|
||||
/*
|
||||
* Alloc tags on a round-robin base instead of the first available one.
|
||||
*/
|
||||
BLK_MQ_F_TAG_RR = 1 << 5,
|
||||
|
||||
/*
|
||||
* Select 'none' during queue registration in case of a single hwq
|
||||
* or shared hwqs instead of 'mq-deadline'.
|
||||
*/
|
||||
BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
|
||||
BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
|
||||
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
||||
|
||||
BLK_MQ_F_MAX = 1 << 7,
|
||||
};
|
||||
#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
|
||||
((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
|
||||
((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
|
||||
#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
|
||||
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
|
||||
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
|
||||
|
||||
#define BLK_MQ_MAX_DEPTH (10240)
|
||||
#define BLK_MQ_NO_HCTX_IDX (-1U)
|
||||
@ -921,6 +910,8 @@ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
|
||||
void blk_freeze_queue_start_non_owner(struct request_queue *q);
|
||||
|
||||
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
||||
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
||||
struct device *dev, unsigned int offset);
|
||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
|
||||
@ -977,14 +968,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq)
|
||||
rq->q->mq_ops->cleanup_rq(rq);
|
||||
}
|
||||
|
||||
static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
|
||||
unsigned int nr_segs)
|
||||
{
|
||||
rq->nr_phys_segments = nr_segs;
|
||||
rq->__data_len = bio->bi_iter.bi_size;
|
||||
rq->bio = rq->biotail = bio;
|
||||
}
|
||||
|
||||
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
|
||||
struct lock_class_key *key);
|
||||
|
||||
|
@ -581,6 +581,12 @@ struct request_queue {
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
struct task_struct *mq_freeze_owner;
|
||||
int mq_freeze_owner_depth;
|
||||
/*
|
||||
* Records disk & queue state in current context, used in unfreeze
|
||||
* queue
|
||||
*/
|
||||
bool mq_freeze_disk_dead;
|
||||
bool mq_freeze_queue_dying;
|
||||
#endif
|
||||
wait_queue_head_t mq_freeze_wq;
|
||||
/*
|
||||
|
@ -286,12 +286,7 @@ static inline void *bvec_virt(struct bio_vec *bvec)
|
||||
*/
|
||||
static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
|
||||
{
|
||||
/*
|
||||
* Note this open codes page_to_phys because page_to_phys is defined in
|
||||
* <asm/io.h>, which we don't want to pull in here. If it ever moves to
|
||||
* a sensible place we should start using it.
|
||||
*/
|
||||
return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset;
|
||||
return page_to_phys(bvec->bv_page) + bvec->bv_offset;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_BVEC_H */
|
||||
|
@ -48,6 +48,7 @@ struct fwnode_handle;
|
||||
* will never get called until they do.
|
||||
* @remove: Called when a device removed from this bus.
|
||||
* @shutdown: Called at shut-down time to quiesce the device.
|
||||
* @irq_get_affinity: Get IRQ affinity mask for the device on this bus.
|
||||
*
|
||||
* @online: Called to put the device back online (after offlining it).
|
||||
* @offline: Called to put the device offline for hot-removal. May fail.
|
||||
@ -87,6 +88,8 @@ struct bus_type {
|
||||
void (*sync_state)(struct device *dev);
|
||||
void (*remove)(struct device *dev);
|
||||
void (*shutdown)(struct device *dev);
|
||||
const struct cpumask *(*irq_get_affinity)(struct device *dev,
|
||||
unsigned int irq_vec);
|
||||
|
||||
int (*online)(struct device *dev);
|
||||
int (*offline)(struct device *dev);
|
||||
|
@ -382,6 +382,7 @@ struct readahead_control;
|
||||
#define IOCB_DIO_CALLER_COMP (1 << 22)
|
||||
/* kiocb is a read or write operation submitted by fs/aio.c. */
|
||||
#define IOCB_AIO_RW (1 << 23)
|
||||
#define IOCB_HAS_METADATA (1 << 24)
|
||||
|
||||
/* for use in trace events */
|
||||
#define TRACE_IOCB_STRINGS \
|
||||
|
@ -78,8 +78,9 @@ struct io_hash_table {
|
||||
|
||||
struct io_mapped_region {
|
||||
struct page **pages;
|
||||
void *vmap_ptr;
|
||||
size_t nr_pages;
|
||||
void *ptr;
|
||||
unsigned nr_pages;
|
||||
unsigned flags;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -293,6 +294,11 @@ struct io_ring_ctx {
|
||||
|
||||
struct io_submit_state submit_state;
|
||||
|
||||
/*
|
||||
* Modifications are protected by ->uring_lock and ->mmap_lock.
|
||||
* The flags, buf_pages and buf_nr_pages fields should be stable
|
||||
* once published.
|
||||
*/
|
||||
struct xarray io_bl_xa;
|
||||
|
||||
struct io_hash_table cancel_table;
|
||||
@ -424,17 +430,10 @@ struct io_ring_ctx {
|
||||
* side will need to grab this lock, to prevent either side from
|
||||
* being run concurrently with the other.
|
||||
*/
|
||||
struct mutex resize_lock;
|
||||
|
||||
/*
|
||||
* If IORING_SETUP_NO_MMAP is used, then the below holds
|
||||
* the gup'ed pages for the two rings, and the sqes.
|
||||
*/
|
||||
unsigned short n_ring_pages;
|
||||
unsigned short n_sqe_pages;
|
||||
struct page **ring_pages;
|
||||
struct page **sqe_pages;
|
||||
struct mutex mmap_lock;
|
||||
|
||||
struct io_mapped_region sq_region;
|
||||
struct io_mapped_region ring_region;
|
||||
/* used for optimised request parameter and wait argument passing */
|
||||
struct io_mapped_region param_region;
|
||||
};
|
||||
@ -481,6 +480,7 @@ enum {
|
||||
REQ_F_BL_NO_RECYCLE_BIT,
|
||||
REQ_F_BUFFERS_COMMIT_BIT,
|
||||
REQ_F_BUF_NODE_BIT,
|
||||
REQ_F_HAS_METADATA_BIT,
|
||||
|
||||
/* not a real bit, just to check we're not overflowing the space */
|
||||
__REQ_F_LAST_BIT,
|
||||
@ -561,6 +561,8 @@ enum {
|
||||
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
|
||||
/* buf node is valid */
|
||||
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
|
||||
/* request has read/write metadata assigned */
|
||||
REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user