mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 02:33:57 +00:00
btrfs: correct typos in multiple comments across various files
Fix some confusing spelling errors that were currently identified, the details are as follows: block-group.c: 2800: uncompressible ==> incompressible extent-tree.c: 3131: EXTEMT ==> EXTENT extent_io.c: 3124: utlizing ==> utilizing extent_map.c: 1323: ealier ==> earlier extent_map.c: 1325: possiblity ==> possibility fiemap.c: 189: emmitted ==> emitted fiemap.c: 197: emmitted ==> emitted fiemap.c: 203: emmitted ==> emitted transaction.h: 36: trasaction ==> transaction volumes.c: 5312: filesysmte ==> filesystem zoned.c: 1977: trasnsaction ==> transaction Signed-off-by: Shen Lichuan <shenlichuan@vivo.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
fa984c9e62
commit
2144e1f23f
@ -2797,7 +2797,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
|
||||
* uncompressed data size, because the compression is only done
|
||||
* when writeback triggered and we don't know how much space we
|
||||
* are actually going to need, so we reserve the uncompressed
|
||||
* size because the data may be uncompressible in the worst case.
|
||||
* size because the data may be incompressible in the worst case.
|
||||
*/
|
||||
if (ret == 0) {
|
||||
bool used;
|
||||
|
@ -45,7 +45,7 @@
|
||||
*
|
||||
* - Copy existing extents
|
||||
*
|
||||
* This happens by re-using scrub facility, as scrub also iterates through
|
||||
* This happens by reusing scrub facility, as scrub also iterates through
|
||||
* existing extents from commit root.
|
||||
*
|
||||
* Location: scrub_write_block_to_dev_replace() from
|
||||
|
@ -3144,7 +3144,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Quick path didn't find the EXTEMT/METADATA_ITEM */
|
||||
/* Quick path didn't find the EXTENT/METADATA_ITEM */
|
||||
if (path->slots[0] - extent_slot > 5)
|
||||
break;
|
||||
extent_slot--;
|
||||
|
@ -3186,7 +3186,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
/*
|
||||
* Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
|
||||
* so it can be cleaned up without utlizing page->mapping.
|
||||
* so it can be cleaned up without utilizing page->mapping.
|
||||
*/
|
||||
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
|
||||
|
||||
|
@ -1326,9 +1326,9 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
|
||||
* not possible to know which task made more progress because we can
|
||||
* cycle back to the first root and first inode if it's not the first
|
||||
* time the shrinker ran, see the above logic. Also a task that started
|
||||
* later may finish ealier than another task and made less progress. So
|
||||
* later may finish earlier than another task and made less progress. So
|
||||
* make this simple and update to the progress of the last task that
|
||||
* finished, with the occasional possiblity of having two consecutive
|
||||
* finished, with the occasional possibility of having two consecutive
|
||||
* runs of the shrinker process the same inodes.
|
||||
*/
|
||||
spin_lock(&fs_info->extent_map_shrinker_lock);
|
||||
|
@ -186,7 +186,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
|
||||
* we have in the cache is the last delalloc range we
|
||||
* found while the file extent item we found can be
|
||||
* either for a whole delalloc range we previously
|
||||
* emmitted or only a part of that range.
|
||||
* emitted or only a part of that range.
|
||||
*
|
||||
* We have two cases here:
|
||||
*
|
||||
@ -194,13 +194,13 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
|
||||
* cached extent's end. In this case just ignore the
|
||||
* current file extent item because we don't want to
|
||||
* overlap with previous ranges that may have been
|
||||
* emmitted already;
|
||||
* emitted already;
|
||||
*
|
||||
* 2) The file extent item starts behind the currently
|
||||
* cached extent but its end offset goes beyond the
|
||||
* end offset of the cached extent. We don't want to
|
||||
* overlap with a previous range that may have been
|
||||
* emmitted already, so we emit the currently cached
|
||||
* emitted already, so we emit the currently cached
|
||||
* extent and then partially store the current file
|
||||
* extent item's range in the cache, for the subrange
|
||||
* going the cached extent's end to the end of the
|
||||
|
@ -5987,7 +5987,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
||||
* offset. This means that new entries created during readdir
|
||||
* are *guaranteed* to be seen in the future by that readdir.
|
||||
* This has broken buggy programs which operate on names as
|
||||
* they're returned by readdir. Until we re-use freed offsets
|
||||
* they're returned by readdir. Until we reuse freed offsets
|
||||
* we have this hack to stop new entries from being returned
|
||||
* under the assumption that they'll never reach this huge
|
||||
* offset.
|
||||
|
@ -469,7 +469,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
|
||||
/*
|
||||
* If a qgroup exists for a subvolume ID, it is possible
|
||||
* that subvolume has been deleted, in which case
|
||||
* re-using that ID would lead to incorrect accounting.
|
||||
* reusing that ID would lead to incorrect accounting.
|
||||
*
|
||||
* Ensure that we skip any such subvol ids.
|
||||
*
|
||||
|
@ -1954,7 +1954,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
|
||||
ASSERT(sctx->raid56_data_stripes);
|
||||
|
||||
/*
|
||||
* For data stripe search, we cannot re-use the same extent/csum paths,
|
||||
* For data stripe search, we cannot reuse the same extent/csum paths,
|
||||
* as the data stripe bytenr may be smaller than previous extent. Thus
|
||||
* we have to use our own extent/csum paths.
|
||||
*/
|
||||
|
@ -1279,7 +1279,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
|
||||
* If we are freeing inodes, we want to make sure all delayed iputs have
|
||||
* completed, because they could have been on an inode with i_nlink == 0, and
|
||||
* thus have been truncated and freed up space. But again this space is not
|
||||
* immediately re-usable, it comes in the form of a delayed ref, which must be
|
||||
* immediately reusable, it comes in the form of a delayed ref, which must be
|
||||
* run and then the transaction must be committed.
|
||||
*
|
||||
* COMMIT_TRANS
|
||||
|
@ -33,7 +33,7 @@ struct btrfs_path;
|
||||
*/
|
||||
#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
|
||||
|
||||
/* Radix-tree tag for roots that are part of the trasaction. */
|
||||
/* Radix-tree tag for roots that are part of the transaction. */
|
||||
#define BTRFS_ROOT_TRANS_TAG 0
|
||||
|
||||
enum btrfs_trans_state {
|
||||
|
@ -5431,7 +5431,7 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
|
||||
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
|
||||
data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
|
||||
|
||||
/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
|
||||
/* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
|
||||
if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
|
||||
ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
|
||||
ctl->stripe_size) + ctl->nparity,
|
||||
|
@ -1973,7 +1973,7 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
||||
if (block_group->meta_write_pointer > eb->start)
|
||||
return -EBUSY;
|
||||
|
||||
/* If for_sync, this hole will be filled with trasnsaction commit. */
|
||||
/* If for_sync, this hole will be filled with transaction commit. */
|
||||
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
|
||||
return -EAGAIN;
|
||||
return -EBUSY;
|
||||
|
Loading…
Reference in New Issue
Block a user