mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 05:26:07 +00:00
btrfs: zoned: handle REQ_OP_ZONE_APPEND as writing
Zoned filesystems use REQ_OP_ZONE_APPEND bios for writing to actual devices. Let btrfs_end_bio() and btrfs_op be aware of it, by mapping REQ_OP_ZONE_APPEND to BTRFS_MAP_WRITE and using btrfs_op() instead of bio_op(). Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
e1326f0339
commit
cfe94440d1
@ -709,7 +709,7 @@ static void end_workqueue_bio(struct bio *bio)
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->status = bio->bi_status;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
|
||||
wq = fs_info->endio_meta_write_workers;
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
|
||||
@ -885,7 +885,7 @@ blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
|
||||
int async = check_async_write(fs_info, BTRFS_I(inode));
|
||||
blk_status_t ret;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
|
||||
/*
|
||||
* called for a read, do the setup so that checksum validation
|
||||
* can happen in the async kernel threads
|
||||
|
@ -2250,7 +2250,7 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
|
||||
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
|
||||
ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -7681,7 +7681,7 @@ static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
|
||||
if (!refcount_dec_and_test(&dip->refs))
|
||||
return;
|
||||
|
||||
if (bio_op(dip->dio_bio) == REQ_OP_WRITE) {
|
||||
if (btrfs_op(dip->dio_bio) == BTRFS_MAP_WRITE) {
|
||||
__endio_write_update_ordered(BTRFS_I(dip->inode),
|
||||
dip->logical_offset,
|
||||
dip->bytes,
|
||||
@ -7847,7 +7847,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_dio_private *dip = bio->bi_private;
|
||||
bool write = bio_op(bio) == REQ_OP_WRITE;
|
||||
bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
|
||||
blk_status_t ret;
|
||||
|
||||
/* Check btrfs_submit_bio_hook() for rules about async submit. */
|
||||
@ -7897,7 +7897,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
|
||||
struct inode *inode,
|
||||
loff_t file_offset)
|
||||
{
|
||||
const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
|
||||
const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
|
||||
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
|
||||
size_t dip_size;
|
||||
struct btrfs_dio_private *dip;
|
||||
@ -7927,7 +7927,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
|
||||
static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
|
||||
struct bio *dio_bio, loff_t file_offset)
|
||||
{
|
||||
const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
|
||||
const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
|
||||
BTRFS_BLOCK_GROUP_RAID56_MASK);
|
||||
|
@ -6448,7 +6448,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
struct btrfs_device *dev = btrfs_io_bio(bio)->device;
|
||||
|
||||
ASSERT(dev->bdev);
|
||||
if (bio_op(bio) == REQ_OP_WRITE)
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE)
|
||||
btrfs_dev_stat_inc_and_print(dev,
|
||||
BTRFS_DEV_STAT_WRITE_ERRS);
|
||||
else if (!(bio->bi_opf & REQ_RAHEAD))
|
||||
@ -6561,10 +6561,10 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
|
||||
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
|
||||
/* In this case, map_length has been set to the length of
|
||||
a single stripe; not the whole write */
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
|
||||
ret = raid56_parity_write(fs_info, bio, bbio,
|
||||
map_length);
|
||||
} else {
|
||||
@ -6587,7 +6587,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
dev = bbio->stripes[dev_nr].dev;
|
||||
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
|
||||
&dev->dev_state) ||
|
||||
(bio_op(first_bio) == REQ_OP_WRITE &&
|
||||
(btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
|
||||
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
|
||||
bbio_error(bbio, first_bio, logical);
|
||||
continue;
|
||||
|
@ -424,6 +424,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
|
||||
case REQ_OP_DISCARD:
|
||||
return BTRFS_MAP_DISCARD;
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_ZONE_APPEND:
|
||||
return BTRFS_MAP_WRITE;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
Loading…
x
Reference in New Issue
Block a user