mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
- Fix DM core's IO submission (which include dm-io and dm-bufio) such
that a bio's IO priority is propagated. Work focused on enabling both DM crypt and verity targets to retain the appropriate IO priority. - Fix DM raid reshape logic to not allow an empty flush bio to be requeued due to false concern about the bio, which doesn't have a data payload, accessing beyond the end of the device. - Fix DM core's internal resume so that it properly calls both presume and resume methods, which fixes the potential for a postsuspend and resume imbalance. - Update DM verity target to set DM_TARGET_SINGLETON flag because it doesn't make sense to have a DM table with a mix of targets that include dm-verity. - Small cleanups in DM crypt, thin, and integrity targets. - Fix references to dm-devel mailing list to use latest list address. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmXwWHkACgkQxSPxCi2d A1oavggAtmftaoAXUJNdiYgIDm6/gKPa0CDyQEoD5cTyQ32R7c6SYNN7u2wrltHX af36lzoksfsOra8uMa4+Q46/XFeqlRzvfu29bhmfAWMkMT3MMq7iGtTFG/SluD7b NWjXhsUT8Fv2Q7BKglUc6cXUIbCZNwNUs5cxx2QobdrD57qxVKDz5HkH5/EggptA cA6dpD7DbpWQhHWm0UN6cOmYNh4kGLiQs4S50N5hc7zlbXfJhhVzflecsVPY+MVN wS/iv/hNenlLuJ7gzIPBwmxRgBbjHHbrbFNVa2yFrPQ8m/AuRbAUqYQO1sOXNOMZ Mkk2G0IB7sJtpnEm+6l0x7A4VmSWvg== =Eoxd -----END PGP SIGNATURE----- Merge tag 'for-6.9/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Fix DM core's IO submission (which include dm-io and dm-bufio) such that a bio's IO priority is propagated. Work focused on enabling both DM crypt and verity targets to retain the appropriate IO priority - Fix DM raid reshape logic to not allow an empty flush bio to be requeued due to false concern about the bio, which doesn't have a data payload, accessing beyond the end of the device - Fix DM core's internal resume so that it properly calls both presume and resume methods, which fixes the potential for a postsuspend and resume imbalance - Update DM verity target to set DM_TARGET_SINGLETON flag because it doesn't make sense to have a DM table with a mix of targets that include dm-verity - Small cleanups in DM crypt, thin, and integrity targets - Fix references to dm-devel mailing list to use latest list address * tag 'for-6.9/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: call the resume method on internal suspend dm raid: fix false positive for requeue needed during reshape dm-integrity: set max_integrity_segments in dm_integrity_io_hints dm: update relevant MODULE_AUTHOR entries to latest dm-devel mailing list dm ioctl: update DM_DRIVER_EMAIL to new dm-devel mailing list dm verity: set DM_TARGET_SINGLETON feature flag dm crypt: Fix IO priority lost when queuing write bios dm verity: Fix IO priority lost when reading FEC and hash dm bufio: Support IO priority dm io: Support IO priority dm crypt: remove redundant state settings after waking up dm thin: add braces around conditional code that spans lines
This commit is contained in:
commit
d2bac0823d
@ -489,5 +489,5 @@ module_init(dm_bio_prison_init);
|
||||
module_exit(dm_bio_prison_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " bio prison");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1292,7 +1292,8 @@ static void dmio_complete(unsigned long error, void *context)
|
||||
}
|
||||
|
||||
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
|
||||
unsigned int n_sectors, unsigned int offset)
|
||||
unsigned int n_sectors, unsigned int offset,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
int r;
|
||||
struct dm_io_request io_req = {
|
||||
@ -1315,7 +1316,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
|
||||
io_req.mem.ptr.vma = (char *)b->data + offset;
|
||||
}
|
||||
|
||||
r = dm_io(&io_req, 1, ®ion, NULL);
|
||||
r = dm_io(&io_req, 1, ®ion, NULL, ioprio);
|
||||
if (unlikely(r))
|
||||
b->end_io(b, errno_to_blk_status(r));
|
||||
}
|
||||
@ -1331,7 +1332,8 @@ static void bio_complete(struct bio *bio)
|
||||
}
|
||||
|
||||
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
|
||||
unsigned int n_sectors, unsigned int offset)
|
||||
unsigned int n_sectors, unsigned int offset,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
struct bio *bio;
|
||||
char *ptr;
|
||||
@ -1339,13 +1341,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
|
||||
|
||||
bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
|
||||
if (!bio) {
|
||||
use_dmio(b, op, sector, n_sectors, offset);
|
||||
use_dmio(b, op, sector, n_sectors, offset, ioprio);
|
||||
return;
|
||||
}
|
||||
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = bio_complete;
|
||||
bio->bi_private = b;
|
||||
bio->bi_ioprio = ioprio;
|
||||
|
||||
ptr = (char *)b->data + offset;
|
||||
len = n_sectors << SECTOR_SHIFT;
|
||||
@ -1368,7 +1371,7 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
|
||||
return sector;
|
||||
}
|
||||
|
||||
static void submit_io(struct dm_buffer *b, enum req_op op,
|
||||
static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
|
||||
void (*end_io)(struct dm_buffer *, blk_status_t))
|
||||
{
|
||||
unsigned int n_sectors;
|
||||
@ -1398,9 +1401,9 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
|
||||
}
|
||||
|
||||
if (b->data_mode != DATA_MODE_VMALLOC)
|
||||
use_bio(b, op, sector, n_sectors, offset);
|
||||
use_bio(b, op, sector, n_sectors, offset, ioprio);
|
||||
else
|
||||
use_dmio(b, op, sector, n_sectors, offset);
|
||||
use_dmio(b, op, sector, n_sectors, offset, ioprio);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1456,7 +1459,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
|
||||
b->write_end = b->dirty_end;
|
||||
|
||||
if (!write_list)
|
||||
submit_io(b, REQ_OP_WRITE, write_endio);
|
||||
submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
|
||||
else
|
||||
list_add_tail(&b->write_list, write_list);
|
||||
}
|
||||
@ -1470,7 +1473,7 @@ static void __flush_write_list(struct list_head *write_list)
|
||||
struct dm_buffer *b =
|
||||
list_entry(write_list->next, struct dm_buffer, write_list);
|
||||
list_del(&b->write_list);
|
||||
submit_io(b, REQ_OP_WRITE, write_endio);
|
||||
submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
|
||||
cond_resched();
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
@ -1852,7 +1855,8 @@ static void read_endio(struct dm_buffer *b, blk_status_t status)
|
||||
* and uses dm_bufio_mark_buffer_dirty to write new data back).
|
||||
*/
|
||||
static void *new_read(struct dm_bufio_client *c, sector_t block,
|
||||
enum new_flag nf, struct dm_buffer **bp)
|
||||
enum new_flag nf, struct dm_buffer **bp,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
int need_submit = 0;
|
||||
struct dm_buffer *b;
|
||||
@ -1905,7 +1909,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
|
||||
return NULL;
|
||||
|
||||
if (need_submit)
|
||||
submit_io(b, REQ_OP_READ, read_endio);
|
||||
submit_io(b, REQ_OP_READ, ioprio, read_endio);
|
||||
|
||||
if (nf != NF_GET) /* we already tested this condition above */
|
||||
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
|
||||
@ -1926,32 +1930,46 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
|
||||
void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp)
|
||||
{
|
||||
return new_read(c, block, NF_GET, bp);
|
||||
return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_get);
|
||||
|
||||
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp)
|
||||
static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp, unsigned short ioprio)
|
||||
{
|
||||
if (WARN_ON_ONCE(dm_bufio_in_request()))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return new_read(c, block, NF_READ, bp);
|
||||
return new_read(c, block, NF_READ, bp, ioprio);
|
||||
}
|
||||
|
||||
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp)
|
||||
{
|
||||
return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_read);
|
||||
|
||||
void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp, unsigned short ioprio)
|
||||
{
|
||||
return __dm_bufio_read(c, block, bp, ioprio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
|
||||
|
||||
void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp)
|
||||
{
|
||||
if (WARN_ON_ONCE(dm_bufio_in_request()))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return new_read(c, block, NF_FRESH, bp);
|
||||
return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_new);
|
||||
|
||||
void dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
sector_t block, unsigned int n_blocks)
|
||||
static void __dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
sector_t block, unsigned int n_blocks,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
struct blk_plug plug;
|
||||
|
||||
@ -1987,7 +2005,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
dm_bufio_unlock(c);
|
||||
|
||||
if (need_submit)
|
||||
submit_io(b, REQ_OP_READ, read_endio);
|
||||
submit_io(b, REQ_OP_READ, ioprio, read_endio);
|
||||
dm_bufio_release(b);
|
||||
|
||||
cond_resched();
|
||||
@ -2002,8 +2020,20 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
flush_plug:
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
|
||||
{
|
||||
return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
|
||||
|
||||
void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
|
||||
unsigned int n_blocks, unsigned short ioprio)
|
||||
{
|
||||
return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
|
||||
|
||||
void dm_bufio_release(struct dm_buffer *b)
|
||||
{
|
||||
struct dm_bufio_client *c = b->c;
|
||||
@ -2167,7 +2197,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
|
||||
if (WARN_ON_ONCE(dm_bufio_in_request()))
|
||||
return -EINVAL;
|
||||
|
||||
return dm_io(&io_req, 1, &io_reg, NULL);
|
||||
return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
|
||||
|
||||
@ -2191,7 +2221,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
|
||||
if (WARN_ON_ONCE(dm_bufio_in_request()))
|
||||
return -EINVAL; /* discards are optional */
|
||||
|
||||
return dm_io(&io_req, 1, &io_reg, NULL);
|
||||
return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
|
||||
|
||||
@ -2968,6 +2998,6 @@ MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
|
||||
module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
|
||||
MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
|
||||
|
||||
MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
|
||||
MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1947,7 +1947,7 @@ static void __exit smq_exit(void)
|
||||
module_init(smq_init);
|
||||
module_exit(smq_exit);
|
||||
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("smq cache policy");
|
||||
|
||||
|
@ -1688,6 +1688,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
|
||||
GFP_NOIO, &cc->bs);
|
||||
clone->bi_private = io;
|
||||
clone->bi_end_io = crypt_endio;
|
||||
clone->bi_ioprio = io->base_bio->bi_ioprio;
|
||||
|
||||
remaining_size = size;
|
||||
|
||||
@ -1964,7 +1965,6 @@ static int dmcrypt_write(void *data)
|
||||
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
spin_lock_irq(&cc->write_thread_lock);
|
||||
goto continue_locked;
|
||||
|
||||
|
@ -573,5 +573,5 @@ static struct target_type dust_target = {
|
||||
module_dm(dust);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " dust test target");
|
||||
MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Bryan Gurney <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -454,6 +454,6 @@ static struct target_type ebs_target = {
|
||||
};
|
||||
module_dm(ebs);
|
||||
|
||||
MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
|
||||
MODULE_DESCRIPTION(DM_NAME " emulated block size target");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -690,5 +690,5 @@ static struct target_type flakey_target = {
|
||||
module_dm(flakey);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " flakey target");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -555,7 +555,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
|
||||
}
|
||||
}
|
||||
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
|
||||
@ -1073,7 +1073,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
|
||||
io_loc.sector = ic->start + SB_SECTORS + sector;
|
||||
io_loc.count = n_sectors;
|
||||
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r)) {
|
||||
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
|
||||
"reading journal" : "writing journal", r);
|
||||
@ -1190,7 +1190,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
|
||||
io_loc.sector = target;
|
||||
io_loc.count = n_sectors;
|
||||
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r)) {
|
||||
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
|
||||
fn(-1UL, data);
|
||||
@ -1519,7 +1519,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
|
||||
fr.io_reg.count = 0,
|
||||
fr.ic = ic;
|
||||
init_completion(&fr.comp);
|
||||
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
|
||||
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
|
||||
BUG_ON(r);
|
||||
}
|
||||
|
||||
@ -1727,7 +1727,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
|
||||
io_loc.sector = sector;
|
||||
io_loc.count = ic->sectors_per_block;
|
||||
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r)) {
|
||||
dio->bi_status = errno_to_blk_status(r);
|
||||
goto free_ret;
|
||||
@ -2806,7 +2806,7 @@ static void integrity_recalc(struct work_struct *w)
|
||||
io_loc.sector = get_data_sector(ic, area, offset);
|
||||
io_loc.count = n_sectors;
|
||||
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r)) {
|
||||
dm_integrity_io_error(ic, "reading data", r);
|
||||
goto err;
|
||||
@ -3485,6 +3485,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
|
||||
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
|
||||
limits->dma_alignment = limits->logical_block_size - 1;
|
||||
}
|
||||
limits->max_integrity_segments = USHRT_MAX;
|
||||
}
|
||||
|
||||
static void calculate_journal_section_size(struct dm_integrity_c *ic)
|
||||
@ -3652,7 +3653,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
|
||||
bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
|
||||
|
||||
blk_integrity_register(disk, &bi);
|
||||
blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
|
||||
}
|
||||
|
||||
static void dm_integrity_free_page_list(struct page_list *pl)
|
||||
|
@ -305,7 +305,7 @@ static void km_dp_init(struct dpages *dp, void *data)
|
||||
*/
|
||||
static void do_region(const blk_opf_t opf, unsigned int region,
|
||||
struct dm_io_region *where, struct dpages *dp,
|
||||
struct io *io)
|
||||
struct io *io, unsigned short ioprio)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct page *page;
|
||||
@ -354,6 +354,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
|
||||
&io->client->bios);
|
||||
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
|
||||
bio->bi_end_io = endio;
|
||||
bio->bi_ioprio = ioprio;
|
||||
store_io_and_region_in_bio(bio, io, region);
|
||||
|
||||
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
|
||||
@ -383,7 +384,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
|
||||
|
||||
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
|
||||
struct dm_io_region *where, struct dpages *dp,
|
||||
struct io *io, int sync)
|
||||
struct io *io, int sync, unsigned short ioprio)
|
||||
{
|
||||
int i;
|
||||
struct dpages old_pages = *dp;
|
||||
@ -400,7 +401,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
|
||||
for (i = 0; i < num_regions; i++) {
|
||||
*dp = old_pages;
|
||||
if (where[i].count || (opf & REQ_PREFLUSH))
|
||||
do_region(opf, i, where + i, dp, io);
|
||||
do_region(opf, i, where + i, dp, io, ioprio);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -425,7 +426,7 @@ static void sync_io_complete(unsigned long error, void *context)
|
||||
|
||||
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
|
||||
unsigned long *error_bits)
|
||||
unsigned long *error_bits, unsigned short ioprio)
|
||||
{
|
||||
struct io *io;
|
||||
struct sync_io sio;
|
||||
@ -447,7 +448,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
io->vma_invalidate_address = dp->vma_invalidate_address;
|
||||
io->vma_invalidate_size = dp->vma_invalidate_size;
|
||||
|
||||
dispatch_io(opf, num_regions, where, dp, io, 1);
|
||||
dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
|
||||
|
||||
wait_for_completion_io(&sio.wait);
|
||||
|
||||
@ -459,7 +460,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
|
||||
static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct dm_io_region *where, blk_opf_t opf,
|
||||
struct dpages *dp, io_notify_fn fn, void *context)
|
||||
struct dpages *dp, io_notify_fn fn, void *context,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
struct io *io;
|
||||
|
||||
@ -479,7 +481,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
io->vma_invalidate_address = dp->vma_invalidate_address;
|
||||
io->vma_invalidate_size = dp->vma_invalidate_size;
|
||||
|
||||
dispatch_io(opf, num_regions, where, dp, io, 0);
|
||||
dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -521,7 +523,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
|
||||
}
|
||||
|
||||
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
||||
struct dm_io_region *where, unsigned long *sync_error_bits)
|
||||
struct dm_io_region *where, unsigned long *sync_error_bits,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
int r;
|
||||
struct dpages dp;
|
||||
@ -532,11 +535,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
||||
|
||||
if (!io_req->notify.fn)
|
||||
return sync_io(io_req->client, num_regions, where,
|
||||
io_req->bi_opf, &dp, sync_error_bits);
|
||||
io_req->bi_opf, &dp, sync_error_bits, ioprio);
|
||||
|
||||
return async_io(io_req->client, num_regions, where,
|
||||
io_req->bi_opf, &dp, io_req->notify.fn,
|
||||
io_req->notify.context);
|
||||
io_req->notify.context, ioprio);
|
||||
}
|
||||
EXPORT_SYMBOL(dm_io);
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include <linux/ima.h>
|
||||
|
||||
#define DM_MSG_PREFIX "ioctl"
|
||||
#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
|
||||
#define DM_DRIVER_EMAIL "dm-devel@lists.linux.dev"
|
||||
|
||||
struct dm_file {
|
||||
/*
|
||||
|
@ -578,9 +578,9 @@ static int run_io_job(struct kcopyd_job *job)
|
||||
io_job_start(job->kc->throttle);
|
||||
|
||||
if (job->op == REQ_OP_READ)
|
||||
r = dm_io(&io_req, 1, &job->source, NULL);
|
||||
r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
|
||||
else
|
||||
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
|
||||
r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -926,5 +926,5 @@ module_init(userspace_dirty_log_init);
|
||||
module_exit(userspace_dirty_log_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
|
||||
MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Jonathan Brassow <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -300,7 +300,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
|
||||
{
|
||||
lc->io_req.bi_opf = op;
|
||||
|
||||
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
|
||||
return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
|
||||
static int flush_header(struct log_c *lc)
|
||||
@ -313,7 +313,7 @@ static int flush_header(struct log_c *lc)
|
||||
|
||||
lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
|
||||
return dm_io(&lc->io_req, 1, &null_location, NULL);
|
||||
return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
|
||||
static int read_header(struct log_c *log)
|
||||
@ -908,5 +908,5 @@ module_init(dm_dirty_log_init);
|
||||
module_exit(dm_dirty_log_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " dirty region log");
|
||||
MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -2266,5 +2266,5 @@ module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs,
|
||||
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " multipath target");
|
||||
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -240,5 +240,5 @@ module_init(dm_rr_init);
|
||||
module_exit(dm_rr_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector");
|
||||
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -3331,14 +3331,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
|
||||
struct mddev *mddev = &rs->md;
|
||||
|
||||
/*
|
||||
* If we're reshaping to add disk(s)), ti->len and
|
||||
* If we're reshaping to add disk(s), ti->len and
|
||||
* mddev->array_sectors will differ during the process
|
||||
* (ti->len > mddev->array_sectors), so we have to requeue
|
||||
* bios with addresses > mddev->array_sectors here or
|
||||
* there will occur accesses past EOD of the component
|
||||
* data images thus erroring the raid set.
|
||||
*/
|
||||
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
|
||||
if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
|
||||
return DM_MAPIO_REQUEUE;
|
||||
|
||||
if (unlikely(!md_handle_request(mddev, bio)))
|
||||
@ -4142,6 +4142,6 @@ MODULE_ALIAS("dm-raid10");
|
||||
MODULE_ALIAS("dm-raid4");
|
||||
MODULE_ALIAS("dm-raid5");
|
||||
MODULE_ALIAS("dm-raid6");
|
||||
MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Neil Brown <dm-devel@lists.linux.dev>");
|
||||
MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -278,7 +278,7 @@ static int mirror_flush(struct dm_target *ti)
|
||||
}
|
||||
|
||||
error_bits = -1;
|
||||
dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
|
||||
dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
|
||||
if (unlikely(error_bits != 0)) {
|
||||
for (i = 0; i < ms->nr_mirrors; i++)
|
||||
if (test_bit(i, &error_bits))
|
||||
@ -554,7 +554,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
|
||||
|
||||
map_region(&io, m, bio);
|
||||
bio_set_m(bio, m);
|
||||
BUG_ON(dm_io(&io_req, 1, &io, NULL));
|
||||
BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
|
||||
}
|
||||
|
||||
static inline int region_in_sync(struct mirror_set *ms, region_t region,
|
||||
@ -681,7 +681,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
*/
|
||||
bio_set_m(bio, get_default_mirror(ms));
|
||||
|
||||
BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
|
||||
BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
|
||||
}
|
||||
|
||||
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
|
||||
|
@ -723,5 +723,5 @@ void dm_rh_start_recovery(struct dm_region_hash *rh)
|
||||
EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " region hash");
|
||||
MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -223,7 +223,7 @@ static void do_metadata(struct work_struct *work)
|
||||
{
|
||||
struct mdata_req *req = container_of(work, struct mdata_req, work);
|
||||
|
||||
req->result = dm_io(req->io_req, 1, req->where, NULL);
|
||||
req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -247,7 +247,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
|
||||
struct mdata_req req;
|
||||
|
||||
if (!metadata)
|
||||
return dm_io(&io_req, 1, &where, NULL);
|
||||
return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
|
||||
|
||||
req.where = &where;
|
||||
req.io_req = &io_req;
|
||||
|
@ -453,12 +453,13 @@ static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bi
|
||||
cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
|
||||
|
||||
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
|
||||
if (r)
|
||||
if (r) {
|
||||
/*
|
||||
* We reused an old cell; we can get rid of
|
||||
* the new one.
|
||||
*/
|
||||
dm_bio_prison_free_cell(pool->prison, cell_prealloc);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -707,9 +708,10 @@ static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
|
||||
(void) sector_div(e, pool->sectors_per_block);
|
||||
}
|
||||
|
||||
if (e < b)
|
||||
if (e < b) {
|
||||
/* Can happen if the bio is within a single block. */
|
||||
e = b;
|
||||
}
|
||||
|
||||
*begin = b;
|
||||
*end = e;
|
||||
@ -721,13 +723,14 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
|
||||
sector_t bi_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
bio_set_dev(bio, tc->pool_dev->bdev);
|
||||
if (block_size_is_power_of_two(pool))
|
||||
if (block_size_is_power_of_two(pool)) {
|
||||
bio->bi_iter.bi_sector =
|
||||
(block << pool->sectors_per_block_shift) |
|
||||
(bi_sector & (pool->sectors_per_block - 1));
|
||||
else
|
||||
} else {
|
||||
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
|
||||
sector_div(bi_sector, pool->sectors_per_block);
|
||||
}
|
||||
}
|
||||
|
||||
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
|
||||
@ -1401,9 +1404,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
||||
if (pool->pf.zero_new_blocks) {
|
||||
if (io_overwrites_block(pool, bio))
|
||||
remap_and_issue_overwrite(tc, bio, data_block, m);
|
||||
else
|
||||
else {
|
||||
ll_zero(tc, m, data_block * pool->sectors_per_block,
|
||||
(data_block + 1) * pool->sectors_per_block);
|
||||
}
|
||||
} else
|
||||
process_prepared_mapping(m);
|
||||
}
|
||||
@ -1416,17 +1420,17 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
|
||||
sector_t virt_block_begin = virt_block * pool->sectors_per_block;
|
||||
sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
|
||||
|
||||
if (virt_block_end <= tc->origin_size)
|
||||
if (virt_block_end <= tc->origin_size) {
|
||||
schedule_copy(tc, virt_block, tc->origin_dev,
|
||||
virt_block, data_dest, cell, bio,
|
||||
pool->sectors_per_block);
|
||||
|
||||
else if (virt_block_begin < tc->origin_size)
|
||||
} else if (virt_block_begin < tc->origin_size) {
|
||||
schedule_copy(tc, virt_block, tc->origin_dev,
|
||||
virt_block, data_dest, cell, bio,
|
||||
tc->origin_size - virt_block_begin);
|
||||
|
||||
else
|
||||
} else
|
||||
schedule_zero(tc, virt_block, data_dest, cell, bio);
|
||||
}
|
||||
|
||||
@ -4560,5 +4564,5 @@ module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
|
||||
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -60,7 +60,8 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
||||
* to the data block. Caller is responsible for releasing buf.
|
||||
*/
|
||||
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
|
||||
unsigned int *offset, struct dm_buffer **buf)
|
||||
unsigned int *offset, struct dm_buffer **buf,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
u64 position, block, rem;
|
||||
u8 *res;
|
||||
@ -69,7 +70,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
|
||||
block = div64_u64_rem(position, v->fec->io_size, &rem);
|
||||
*offset = (unsigned int)rem;
|
||||
|
||||
res = dm_bufio_read(v->fec->bufio, block, buf);
|
||||
res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
|
||||
if (IS_ERR(res)) {
|
||||
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
|
||||
v->data_dev->name, (unsigned long long)rsb,
|
||||
@ -121,16 +122,17 @@ static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
|
||||
* Decode all RS blocks from buffers and copy corrected bytes into fio->output
|
||||
* starting from block_offset.
|
||||
*/
|
||||
static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
||||
u64 rsb, int byte_index, unsigned int block_offset,
|
||||
int neras)
|
||||
static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
struct dm_verity_fec_io *fio, u64 rsb, int byte_index,
|
||||
unsigned int block_offset, int neras)
|
||||
{
|
||||
int r, corrected = 0, res;
|
||||
struct dm_buffer *buf;
|
||||
unsigned int n, i, offset;
|
||||
u8 *par, *block;
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
|
||||
if (IS_ERR(par))
|
||||
return PTR_ERR(par);
|
||||
|
||||
@ -158,7 +160,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
||||
if (offset >= v->fec->io_size) {
|
||||
dm_bufio_release(buf);
|
||||
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
|
||||
if (IS_ERR(par))
|
||||
return PTR_ERR(par);
|
||||
}
|
||||
@ -210,6 +212,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
u8 *bbuf, *rs_block;
|
||||
u8 want_digest[HASH_MAX_DIGESTSIZE];
|
||||
unsigned int n, k;
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
if (neras)
|
||||
*neras = 0;
|
||||
@ -248,7 +251,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
|
||||
bufio = v->bufio;
|
||||
}
|
||||
|
||||
bbuf = dm_bufio_read(bufio, block, &buf);
|
||||
bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
|
||||
if (IS_ERR(bbuf)) {
|
||||
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
|
||||
v->data_dev->name,
|
||||
@ -377,7 +380,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
|
||||
if (unlikely(r < 0))
|
||||
return r;
|
||||
|
||||
r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
|
||||
r = fec_decode_bufs(v, io, fio, rsb, r, pos, neras);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
|
@ -51,6 +51,7 @@ static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
|
||||
struct dm_verity_prefetch_work {
|
||||
struct work_struct work;
|
||||
struct dm_verity *v;
|
||||
unsigned short ioprio;
|
||||
sector_t block;
|
||||
unsigned int n_blocks;
|
||||
};
|
||||
@ -294,6 +295,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||
int r;
|
||||
sector_t hash_block;
|
||||
unsigned int offset;
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
verity_hash_at_level(v, block, level, &hash_block, &offset);
|
||||
|
||||
@ -307,8 +309,10 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||
*/
|
||||
return -EAGAIN;
|
||||
}
|
||||
} else
|
||||
data = dm_bufio_read(v->bufio, hash_block, &buf);
|
||||
} else {
|
||||
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
|
||||
&buf, bio_prio(bio));
|
||||
}
|
||||
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
@ -511,7 +515,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
||||
io_loc.bdev = v->data_dev->bdev;
|
||||
io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
|
||||
io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL);
|
||||
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r))
|
||||
goto free_ret;
|
||||
|
||||
@ -752,14 +756,16 @@ static void verity_prefetch_io(struct work_struct *work)
|
||||
hash_block_end = v->hash_blocks - 1;
|
||||
}
|
||||
no_prefetch_cluster:
|
||||
dm_bufio_prefetch(v->bufio, hash_block_start,
|
||||
hash_block_end - hash_block_start + 1);
|
||||
dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
|
||||
hash_block_end - hash_block_start + 1,
|
||||
pw->ioprio);
|
||||
}
|
||||
|
||||
kfree(pw);
|
||||
}
|
||||
|
||||
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
|
||||
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
|
||||
unsigned short ioprio)
|
||||
{
|
||||
sector_t block = io->block;
|
||||
unsigned int n_blocks = io->n_blocks;
|
||||
@ -787,6 +793,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
|
||||
pw->v = v;
|
||||
pw->block = block;
|
||||
pw->n_blocks = n_blocks;
|
||||
pw->ioprio = ioprio;
|
||||
queue_work(v->verify_wq, &pw->work);
|
||||
}
|
||||
|
||||
@ -829,7 +836,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
verity_fec_init_io(io);
|
||||
|
||||
verity_submit_prefetch(v, io);
|
||||
verity_submit_prefetch(v, io, bio_prio(bio));
|
||||
|
||||
submit_bio_noacct(bio);
|
||||
|
||||
@ -1559,7 +1566,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
|
||||
|
||||
static struct target_type verity_target = {
|
||||
.name = "verity",
|
||||
.features = DM_TARGET_IMMUTABLE,
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
|
||||
.version = {1, 10, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = verity_ctr,
|
||||
|
@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
|
||||
req.notify.context = &endio;
|
||||
|
||||
/* writing via async dm-io (implied by notify.fn above) won't return an error */
|
||||
(void) dm_io(&req, 1, ®ion, NULL);
|
||||
(void) dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
||||
i = j;
|
||||
}
|
||||
|
||||
@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
|
||||
req.notify.fn = NULL;
|
||||
req.notify.context = NULL;
|
||||
|
||||
r = dm_io(&req, 1, ®ion, NULL);
|
||||
r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r))
|
||||
writecache_error(wc, r, "error writing superblock");
|
||||
}
|
||||
@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
|
||||
req.client = wc->dm_io;
|
||||
req.notify.fn = NULL;
|
||||
|
||||
r = dm_io(&req, 1, ®ion, NULL);
|
||||
r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
||||
if (unlikely(r))
|
||||
writecache_error(wc, r, "error flushing metadata: %d", r);
|
||||
}
|
||||
@ -990,7 +990,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
|
||||
req.client = wc->dm_io;
|
||||
req.notify.fn = NULL;
|
||||
|
||||
return dm_io(&req, 1, ®ion, NULL);
|
||||
return dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
||||
}
|
||||
|
||||
static void writecache_resume(struct dm_target *ti)
|
||||
@ -2776,5 +2776,5 @@ static struct target_type writecache_target = {
|
||||
module_dm(writecache);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " writecache target");
|
||||
MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -2948,6 +2948,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
|
||||
|
||||
static void __dm_internal_resume(struct mapped_device *md)
|
||||
{
|
||||
int r;
|
||||
struct dm_table *map;
|
||||
|
||||
BUG_ON(!md->internal_suspend_count);
|
||||
|
||||
if (--md->internal_suspend_count)
|
||||
@ -2956,12 +2959,23 @@ static void __dm_internal_resume(struct mapped_device *md)
|
||||
if (dm_suspended_md(md))
|
||||
goto done; /* resume from nested suspend */
|
||||
|
||||
/*
|
||||
* NOTE: existing callers don't need to call dm_table_resume_targets
|
||||
* (which may fail -- so best to avoid it for now by passing NULL map)
|
||||
*/
|
||||
(void) __dm_resume(md, NULL);
|
||||
|
||||
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
||||
r = __dm_resume(md, map);
|
||||
if (r) {
|
||||
/*
|
||||
* If a preresume method of some target failed, we are in a
|
||||
* tricky situation. We can't return an error to the caller. We
|
||||
* can't fake success because then the "resume" and
|
||||
* "postsuspend" methods would not be paired correctly, and it
|
||||
* would break various targets, for example it would cause list
|
||||
* corruption in the "origin" target.
|
||||
*
|
||||
* So, we fake normal suspend here, to make sure that the
|
||||
* "resume" and "postsuspend" methods will be paired correctly.
|
||||
*/
|
||||
DMERR("Preresume method failed: %d", r);
|
||||
set_bit(DMF_SUSPENDED, &md->flags);
|
||||
}
|
||||
done:
|
||||
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
||||
smp_mb__after_atomic();
|
||||
@ -3515,5 +3529,5 @@ module_param(swap_bios, int, 0644);
|
||||
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " driver");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -656,7 +656,7 @@ EXPORT_SYMBOL_GPL(dm_bm_checksum);
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||
MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
|
||||
MODULE_DESCRIPTION("Immutable metadata library for dm");
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
@ -64,6 +64,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
|
||||
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp);
|
||||
|
||||
void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
|
||||
struct dm_buffer **bp, unsigned short ioprio);
|
||||
|
||||
/*
|
||||
* Like dm_bufio_read, but return buffer from cache, don't read
|
||||
* it. If the buffer is not in the cache, return NULL.
|
||||
@ -86,6 +89,10 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
|
||||
void dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
sector_t block, unsigned int n_blocks);
|
||||
|
||||
void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c,
|
||||
sector_t block, unsigned int n_blocks,
|
||||
unsigned short ioprio);
|
||||
|
||||
/*
|
||||
* Release a reference obtained with dm_bufio_{read,get,new}. The data
|
||||
* pointer and dm_buffer pointer is no longer valid after this call.
|
||||
|
@ -80,7 +80,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
|
||||
* error occurred doing io to the corresponding region.
|
||||
*/
|
||||
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
||||
struct dm_io_region *region, unsigned int long *sync_error_bits);
|
||||
struct dm_io_region *region, unsigned int long *sync_error_bits,
|
||||
unsigned short ioprio);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_DM_IO_H */
|
||||
|
Loading…
Reference in New Issue
Block a user