mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: fix panic when trying to destroy a newly allocated Btrfs: allow more metadata chunk preallocation Btrfs: fallback on uncompressed io if compressed io fails Btrfs: find ideal block group for caching Btrfs: avoid null deref in unpin_extent_cache() Btrfs: skip btrfs_release_path in btrfs_update_root and btrfs_del_root Btrfs: fix some metadata enospc issues Btrfs: fix how we set max_size for free space clusters Btrfs: cleanup transaction starting and fix journal_info usage Btrfs: fix data allocation hint start
This commit is contained in:
commit
aa021baa32
@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
|
|||||||
|
|
||||||
free_space = btrfs_super_total_bytes(disk_super);
|
free_space = btrfs_super_total_bytes(disk_super);
|
||||||
/*
|
/*
|
||||||
* we allow the metadata to grow to a max of either 5gb or 5% of the
|
* we allow the metadata to grow to a max of either 10gb or 5% of the
|
||||||
* space in the volume.
|
* space in the volume.
|
||||||
*/
|
*/
|
||||||
min_metadata = min((u64)5 * 1024 * 1024 * 1024,
|
min_metadata = min((u64)10 * 1024 * 1024 * 1024,
|
||||||
div64_u64(free_space * 5, 100));
|
div64_u64(free_space * 5, 100));
|
||||||
if (info->total_bytes >= min_metadata) {
|
if (info->total_bytes >= min_metadata) {
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum btrfs_loop_type {
|
enum btrfs_loop_type {
|
||||||
LOOP_CACHED_ONLY = 0,
|
LOOP_FIND_IDEAL = 0,
|
||||||
LOOP_CACHING_NOWAIT = 1,
|
LOOP_CACHING_NOWAIT = 1,
|
||||||
LOOP_CACHING_WAIT = 2,
|
LOOP_CACHING_WAIT = 2,
|
||||||
LOOP_ALLOC_CHUNK = 3,
|
LOOP_ALLOC_CHUNK = 3,
|
||||||
@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
struct btrfs_block_group_cache *block_group = NULL;
|
struct btrfs_block_group_cache *block_group = NULL;
|
||||||
int empty_cluster = 2 * 1024 * 1024;
|
int empty_cluster = 2 * 1024 * 1024;
|
||||||
int allowed_chunk_alloc = 0;
|
int allowed_chunk_alloc = 0;
|
||||||
|
int done_chunk_alloc = 0;
|
||||||
struct btrfs_space_info *space_info;
|
struct btrfs_space_info *space_info;
|
||||||
int last_ptr_loop = 0;
|
int last_ptr_loop = 0;
|
||||||
int loop = 0;
|
int loop = 0;
|
||||||
bool found_uncached_bg = false;
|
bool found_uncached_bg = false;
|
||||||
bool failed_cluster_refill = false;
|
bool failed_cluster_refill = false;
|
||||||
bool failed_alloc = false;
|
bool failed_alloc = false;
|
||||||
|
u64 ideal_cache_percent = 0;
|
||||||
|
u64 ideal_cache_offset = 0;
|
||||||
|
|
||||||
WARN_ON(num_bytes < root->sectorsize);
|
WARN_ON(num_bytes < root->sectorsize);
|
||||||
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
|
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
|
||||||
@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
empty_cluster = 0;
|
empty_cluster = 0;
|
||||||
|
|
||||||
if (search_start == hint_byte) {
|
if (search_start == hint_byte) {
|
||||||
|
ideal_cache:
|
||||||
block_group = btrfs_lookup_block_group(root->fs_info,
|
block_group = btrfs_lookup_block_group(root->fs_info,
|
||||||
search_start);
|
search_start);
|
||||||
/*
|
/*
|
||||||
* we don't want to use the block group if it doesn't match our
|
* we don't want to use the block group if it doesn't match our
|
||||||
* allocation bits, or if its not cached.
|
* allocation bits, or if its not cached.
|
||||||
|
*
|
||||||
|
* However if we are re-searching with an ideal block group
|
||||||
|
* picked out then we don't care that the block group is cached.
|
||||||
*/
|
*/
|
||||||
if (block_group && block_group_bits(block_group, data) &&
|
if (block_group && block_group_bits(block_group, data) &&
|
||||||
block_group_cache_done(block_group)) {
|
(block_group->cached != BTRFS_CACHE_NO ||
|
||||||
|
search_start == ideal_cache_offset)) {
|
||||||
down_read(&space_info->groups_sem);
|
down_read(&space_info->groups_sem);
|
||||||
if (list_empty(&block_group->list) ||
|
if (list_empty(&block_group->list) ||
|
||||||
block_group->ro) {
|
block_group->ro) {
|
||||||
@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
*/
|
*/
|
||||||
btrfs_put_block_group(block_group);
|
btrfs_put_block_group(block_group);
|
||||||
up_read(&space_info->groups_sem);
|
up_read(&space_info->groups_sem);
|
||||||
} else
|
} else {
|
||||||
goto have_block_group;
|
goto have_block_group;
|
||||||
|
}
|
||||||
} else if (block_group) {
|
} else if (block_group) {
|
||||||
btrfs_put_block_group(block_group);
|
btrfs_put_block_group(block_group);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
search:
|
search:
|
||||||
down_read(&space_info->groups_sem);
|
down_read(&space_info->groups_sem);
|
||||||
list_for_each_entry(block_group, &space_info->block_groups, list) {
|
list_for_each_entry(block_group, &space_info->block_groups, list) {
|
||||||
@ -4209,28 +4217,45 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
|
|
||||||
have_block_group:
|
have_block_group:
|
||||||
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
|
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
|
||||||
|
u64 free_percent;
|
||||||
|
|
||||||
|
free_percent = btrfs_block_group_used(&block_group->item);
|
||||||
|
free_percent *= 100;
|
||||||
|
free_percent = div64_u64(free_percent,
|
||||||
|
block_group->key.offset);
|
||||||
|
free_percent = 100 - free_percent;
|
||||||
|
if (free_percent > ideal_cache_percent &&
|
||||||
|
likely(!block_group->ro)) {
|
||||||
|
ideal_cache_offset = block_group->key.objectid;
|
||||||
|
ideal_cache_percent = free_percent;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we want to start caching kthreads, but not too many
|
* We only want to start kthread caching if we are at
|
||||||
* right off the bat so we don't overwhelm the system,
|
* the point where we will wait for caching to make
|
||||||
* so only start them if there are less than 2 and we're
|
* progress, or if our ideal search is over and we've
|
||||||
* in the initial allocation phase.
|
* found somebody to start caching.
|
||||||
*/
|
*/
|
||||||
if (loop > LOOP_CACHING_NOWAIT ||
|
if (loop > LOOP_CACHING_NOWAIT ||
|
||||||
atomic_read(&space_info->caching_threads) < 2) {
|
(loop > LOOP_FIND_IDEAL &&
|
||||||
|
atomic_read(&space_info->caching_threads) < 2)) {
|
||||||
ret = cache_block_group(block_group);
|
ret = cache_block_group(block_group);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
|
found_uncached_bg = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If loop is set for cached only, try the next block
|
||||||
|
* group.
|
||||||
|
*/
|
||||||
|
if (loop == LOOP_FIND_IDEAL)
|
||||||
|
goto loop;
|
||||||
}
|
}
|
||||||
|
|
||||||
cached = block_group_cache_done(block_group);
|
cached = block_group_cache_done(block_group);
|
||||||
if (unlikely(!cached)) {
|
if (unlikely(!cached))
|
||||||
found_uncached_bg = true;
|
found_uncached_bg = true;
|
||||||
|
|
||||||
/* if we only want cached bgs, loop */
|
|
||||||
if (loop == LOOP_CACHED_ONLY)
|
|
||||||
goto loop;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(block_group->ro))
|
if (unlikely(block_group->ro))
|
||||||
goto loop;
|
goto loop;
|
||||||
|
|
||||||
@ -4410,9 +4435,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
}
|
}
|
||||||
up_read(&space_info->groups_sem);
|
up_read(&space_info->groups_sem);
|
||||||
|
|
||||||
/* LOOP_CACHED_ONLY, only search fully cached block groups
|
/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
|
||||||
* LOOP_CACHING_NOWAIT, search partially cached block groups, but
|
* for them to make caching progress. Also
|
||||||
* dont wait foR them to finish caching
|
* determine the best possible bg to cache
|
||||||
|
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
|
||||||
|
* caching kthreads as we move along
|
||||||
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
|
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
|
||||||
* LOOP_ALLOC_CHUNK, force a chunk allocation and try again
|
* LOOP_ALLOC_CHUNK, force a chunk allocation and try again
|
||||||
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
|
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
|
||||||
@ -4421,12 +4448,47 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
|
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
|
||||||
(found_uncached_bg || empty_size || empty_cluster ||
|
(found_uncached_bg || empty_size || empty_cluster ||
|
||||||
allowed_chunk_alloc)) {
|
allowed_chunk_alloc)) {
|
||||||
if (found_uncached_bg) {
|
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
|
||||||
found_uncached_bg = false;
|
found_uncached_bg = false;
|
||||||
if (loop < LOOP_CACHING_WAIT) {
|
loop++;
|
||||||
loop++;
|
if (!ideal_cache_percent &&
|
||||||
|
atomic_read(&space_info->caching_threads))
|
||||||
goto search;
|
goto search;
|
||||||
}
|
|
||||||
|
/*
|
||||||
|
* 1 of the following 2 things have happened so far
|
||||||
|
*
|
||||||
|
* 1) We found an ideal block group for caching that
|
||||||
|
* is mostly full and will cache quickly, so we might
|
||||||
|
* as well wait for it.
|
||||||
|
*
|
||||||
|
* 2) We searched for cached only and we didn't find
|
||||||
|
* anything, and we didn't start any caching kthreads
|
||||||
|
* either, so chances are we will loop through and
|
||||||
|
* start a couple caching kthreads, and then come back
|
||||||
|
* around and just wait for them. This will be slower
|
||||||
|
* because we will have 2 caching kthreads reading at
|
||||||
|
* the same time when we could have just started one
|
||||||
|
* and waited for it to get far enough to give us an
|
||||||
|
* allocation, so go ahead and go to the wait caching
|
||||||
|
* loop.
|
||||||
|
*/
|
||||||
|
loop = LOOP_CACHING_WAIT;
|
||||||
|
search_start = ideal_cache_offset;
|
||||||
|
ideal_cache_percent = 0;
|
||||||
|
goto ideal_cache;
|
||||||
|
} else if (loop == LOOP_FIND_IDEAL) {
|
||||||
|
/*
|
||||||
|
* Didn't find a uncached bg, wait on anything we find
|
||||||
|
* next.
|
||||||
|
*/
|
||||||
|
loop = LOOP_CACHING_WAIT;
|
||||||
|
goto search;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loop < LOOP_CACHING_WAIT) {
|
||||||
|
loop++;
|
||||||
|
goto search;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loop == LOOP_ALLOC_CHUNK) {
|
if (loop == LOOP_ALLOC_CHUNK) {
|
||||||
@ -4438,7 +4500,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
ret = do_chunk_alloc(trans, root, num_bytes +
|
ret = do_chunk_alloc(trans, root, num_bytes +
|
||||||
2 * 1024 * 1024, data, 1);
|
2 * 1024 * 1024, data, 1);
|
||||||
allowed_chunk_alloc = 0;
|
allowed_chunk_alloc = 0;
|
||||||
} else {
|
done_chunk_alloc = 1;
|
||||||
|
} else if (!done_chunk_alloc) {
|
||||||
space_info->force_alloc = 1;
|
space_info->force_alloc = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
|
|||||||
write_lock(&tree->lock);
|
write_lock(&tree->lock);
|
||||||
em = lookup_extent_mapping(tree, start, len);
|
em = lookup_extent_mapping(tree, start, len);
|
||||||
|
|
||||||
WARN_ON(em->start != start || !em);
|
WARN_ON(!em || em->start != start);
|
||||||
|
|
||||||
if (!em)
|
if (!em)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1296,7 +1296,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
|
|||||||
window_start = entry->offset;
|
window_start = entry->offset;
|
||||||
window_free = entry->bytes;
|
window_free = entry->bytes;
|
||||||
last = entry;
|
last = entry;
|
||||||
max_extent = 0;
|
max_extent = entry->bytes;
|
||||||
} else {
|
} else {
|
||||||
last = next;
|
last = next;
|
||||||
window_free += next->bytes;
|
window_free += next->bytes;
|
||||||
|
@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
|||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||||
struct extent_io_tree *io_tree;
|
struct extent_io_tree *io_tree;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
if (list_empty(&async_cow->extents))
|
if (list_empty(&async_cow->extents))
|
||||||
return 0;
|
return 0;
|
||||||
@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
|||||||
|
|
||||||
io_tree = &BTRFS_I(inode)->io_tree;
|
io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
|
||||||
|
retry:
|
||||||
/* did the compression code fall back to uncompressed IO? */
|
/* did the compression code fall back to uncompressed IO? */
|
||||||
if (!async_extent->pages) {
|
if (!async_extent->pages) {
|
||||||
int page_started = 0;
|
int page_started = 0;
|
||||||
@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
|||||||
async_extent->ram_size - 1, GFP_NOFS);
|
async_extent->ram_size - 1, GFP_NOFS);
|
||||||
|
|
||||||
/* allocate blocks */
|
/* allocate blocks */
|
||||||
cow_file_range(inode, async_cow->locked_page,
|
ret = cow_file_range(inode, async_cow->locked_page,
|
||||||
async_extent->start,
|
async_extent->start,
|
||||||
async_extent->start +
|
async_extent->start +
|
||||||
async_extent->ram_size - 1,
|
async_extent->ram_size - 1,
|
||||||
&page_started, &nr_written, 0);
|
&page_started, &nr_written, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if page_started, cow_file_range inserted an
|
* if page_started, cow_file_range inserted an
|
||||||
@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
|||||||
* and IO for us. Otherwise, we need to submit
|
* and IO for us. Otherwise, we need to submit
|
||||||
* all those pages down to the drive.
|
* all those pages down to the drive.
|
||||||
*/
|
*/
|
||||||
if (!page_started)
|
if (!page_started && !ret)
|
||||||
extent_write_locked_range(io_tree,
|
extent_write_locked_range(io_tree,
|
||||||
inode, async_extent->start,
|
inode, async_extent->start,
|
||||||
async_extent->start +
|
async_extent->start +
|
||||||
@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
|||||||
async_extent->compressed_size,
|
async_extent->compressed_size,
|
||||||
0, alloc_hint,
|
0, alloc_hint,
|
||||||
(u64)-1, &ins, 1);
|
(u64)-1, &ins, 1);
|
||||||
BUG_ON(ret);
|
if (ret) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < async_extent->nr_pages; i++) {
|
||||||
|
WARN_ON(async_extent->pages[i]->mapping);
|
||||||
|
page_cache_release(async_extent->pages[i]);
|
||||||
|
}
|
||||||
|
kfree(async_extent->pages);
|
||||||
|
async_extent->nr_pages = 0;
|
||||||
|
async_extent->pages = NULL;
|
||||||
|
unlock_extent(io_tree, async_extent->start,
|
||||||
|
async_extent->start +
|
||||||
|
async_extent->ram_size - 1, GFP_NOFS);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
em = alloc_extent_map(GFP_NOFS);
|
em = alloc_extent_map(GFP_NOFS);
|
||||||
em->start = async_extent->start;
|
em->start = async_extent->start;
|
||||||
em->len = async_extent->ram_size;
|
em->len = async_extent->ram_size;
|
||||||
@ -743,8 +758,22 @@ static noinline int cow_file_range(struct inode *inode,
|
|||||||
em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
|
em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
|
||||||
start, num_bytes);
|
start, num_bytes);
|
||||||
if (em) {
|
if (em) {
|
||||||
alloc_hint = em->block_start;
|
/*
|
||||||
free_extent_map(em);
|
* if block start isn't an actual block number then find the
|
||||||
|
* first block in this inode and use that as a hint. If that
|
||||||
|
* block is also bogus then just don't worry about it.
|
||||||
|
*/
|
||||||
|
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
|
||||||
|
free_extent_map(em);
|
||||||
|
em = search_extent_mapping(em_tree, 0, 0);
|
||||||
|
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
|
||||||
|
alloc_hint = em->block_start;
|
||||||
|
if (em)
|
||||||
|
free_extent_map(em);
|
||||||
|
} else {
|
||||||
|
alloc_hint = em->block_start;
|
||||||
|
free_extent_map(em);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
read_unlock(&BTRFS_I(inode)->extent_tree.lock);
|
read_unlock(&BTRFS_I(inode)->extent_tree.lock);
|
||||||
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
|
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
|
||||||
@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||||||
|
|
||||||
root = BTRFS_I(dir)->root;
|
root = BTRFS_I(dir)->root;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 5 items for unlink inode
|
||||||
|
* 1 for orphan
|
||||||
|
*/
|
||||||
|
ret = btrfs_reserve_metadata_space(root, 6);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
btrfs_unreserve_metadata_space(root, 6);
|
||||||
|
return PTR_ERR(trans);
|
||||||
|
}
|
||||||
|
|
||||||
btrfs_set_trans_block_group(trans, dir);
|
btrfs_set_trans_block_group(trans, dir);
|
||||||
|
|
||||||
@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||||||
nr = trans->blocks_used;
|
nr = trans->blocks_used;
|
||||||
|
|
||||||
btrfs_end_transaction_throttle(trans, root);
|
btrfs_end_transaction_throttle(trans, root);
|
||||||
|
btrfs_unreserve_metadata_space(root, 6);
|
||||||
btrfs_btree_balance_dirty(root, nr);
|
btrfs_btree_balance_dirty(root, nr);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||||||
inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||||
return -ENOTEMPTY;
|
return -ENOTEMPTY;
|
||||||
|
|
||||||
|
ret = btrfs_reserve_metadata_space(root, 5);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
btrfs_unreserve_metadata_space(root, 5);
|
||||||
|
return PTR_ERR(trans);
|
||||||
|
}
|
||||||
|
|
||||||
btrfs_set_trans_block_group(trans, dir);
|
btrfs_set_trans_block_group(trans, dir);
|
||||||
|
|
||||||
if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
|
if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
|
||||||
@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||||||
out:
|
out:
|
||||||
nr = trans->blocks_used;
|
nr = trans->blocks_used;
|
||||||
ret = btrfs_end_transaction_throttle(trans, root);
|
ret = btrfs_end_transaction_throttle(trans, root);
|
||||||
|
btrfs_unreserve_metadata_space(root, 5);
|
||||||
btrfs_btree_balance_dirty(root, nr);
|
btrfs_btree_balance_dirty(root, nr);
|
||||||
|
|
||||||
if (ret && !err)
|
if (ret && !err)
|
||||||
@ -5128,6 +5180,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||||||
ei->logged_trans = 0;
|
ei->logged_trans = 0;
|
||||||
ei->outstanding_extents = 0;
|
ei->outstanding_extents = 0;
|
||||||
ei->reserved_extents = 0;
|
ei->reserved_extents = 0;
|
||||||
|
ei->root = NULL;
|
||||||
spin_lock_init(&ei->accounting_lock);
|
spin_lock_init(&ei->accounting_lock);
|
||||||
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
||||||
INIT_LIST_HEAD(&ei->i_orphan);
|
INIT_LIST_HEAD(&ei->i_orphan);
|
||||||
@ -5143,6 +5196,14 @@ void btrfs_destroy_inode(struct inode *inode)
|
|||||||
WARN_ON(!list_empty(&inode->i_dentry));
|
WARN_ON(!list_empty(&inode->i_dentry));
|
||||||
WARN_ON(inode->i_data.nrpages);
|
WARN_ON(inode->i_data.nrpages);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This can happen where we create an inode, but somebody else also
|
||||||
|
* created the same inode and we need to destroy the one we already
|
||||||
|
* created.
|
||||||
|
*/
|
||||||
|
if (!root)
|
||||||
|
goto free;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure we're properly removed from the ordered operation
|
* Make sure we're properly removed from the ordered operation
|
||||||
* lists.
|
* lists.
|
||||||
@ -5178,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
|
|||||||
}
|
}
|
||||||
inode_tree_del(inode);
|
inode_tree_del(inode);
|
||||||
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
|
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
|
||||||
|
free:
|
||||||
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
|
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5283,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||||||
return -ENOTEMPTY;
|
return -ENOTEMPTY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 2 items for dir items
|
* We want to reserve the absolute worst case amount of items. So if
|
||||||
* 1 item for orphan entry
|
* both inodes are subvols and we need to unlink them then that would
|
||||||
* 1 item for ref
|
* require 4 item modifications, but if they are both normal inodes it
|
||||||
|
* would require 5 item modifications, so we'll assume their normal
|
||||||
|
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
|
||||||
|
* should cover the worst case number of items we'll modify.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_reserve_metadata_space(root, 4);
|
ret = btrfs_reserve_metadata_space(root, 11);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -5403,7 +5468,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||||||
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||||
up_read(&root->fs_info->subvol_sem);
|
up_read(&root->fs_info->subvol_sem);
|
||||||
|
|
||||||
btrfs_unreserve_metadata_space(root, 4);
|
btrfs_unreserve_metadata_space(root, 11);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||||||
write_extent_buffer(l, item, ptr, sizeof(*item));
|
write_extent_buffer(l, item, ptr, sizeof(*item));
|
||||||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||||
out:
|
out:
|
||||||
btrfs_release_path(root, path);
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||||||
BUG_ON(refs != 0);
|
BUG_ON(refs != 0);
|
||||||
ret = btrfs_del_item(trans, root, path);
|
ret = btrfs_del_item(trans, root, path);
|
||||||
out:
|
out:
|
||||||
btrfs_release_path(root, path);
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum btrfs_trans_type {
|
||||||
|
TRANS_START,
|
||||||
|
TRANS_JOIN,
|
||||||
|
TRANS_USERSPACE,
|
||||||
|
};
|
||||||
|
|
||||||
static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
|
static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
|
||||||
int num_blocks, int wait)
|
int num_blocks, int type)
|
||||||
{
|
{
|
||||||
struct btrfs_trans_handle *h =
|
struct btrfs_trans_handle *h =
|
||||||
kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
|
kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
|
||||||
@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
|
|||||||
|
|
||||||
mutex_lock(&root->fs_info->trans_mutex);
|
mutex_lock(&root->fs_info->trans_mutex);
|
||||||
if (!root->fs_info->log_root_recovering &&
|
if (!root->fs_info->log_root_recovering &&
|
||||||
((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
|
((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
|
||||||
|
type == TRANS_USERSPACE))
|
||||||
wait_current_trans(root);
|
wait_current_trans(root);
|
||||||
ret = join_transaction(root);
|
ret = join_transaction(root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
@ -186,7 +193,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
|
|||||||
h->alloc_exclude_start = 0;
|
h->alloc_exclude_start = 0;
|
||||||
h->delayed_ref_updates = 0;
|
h->delayed_ref_updates = 0;
|
||||||
|
|
||||||
if (!current->journal_info)
|
if (!current->journal_info && type != TRANS_USERSPACE)
|
||||||
current->journal_info = h;
|
current->journal_info = h;
|
||||||
|
|
||||||
root->fs_info->running_transaction->use_count++;
|
root->fs_info->running_transaction->use_count++;
|
||||||
@ -198,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
|
|||||||
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
||||||
int num_blocks)
|
int num_blocks)
|
||||||
{
|
{
|
||||||
return start_transaction(root, num_blocks, 1);
|
return start_transaction(root, num_blocks, TRANS_START);
|
||||||
}
|
}
|
||||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
|
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
|
||||||
int num_blocks)
|
int num_blocks)
|
||||||
{
|
{
|
||||||
return start_transaction(root, num_blocks, 0);
|
return start_transaction(root, num_blocks, TRANS_JOIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
|
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
|
||||||
int num_blocks)
|
int num_blocks)
|
||||||
{
|
{
|
||||||
return start_transaction(r, num_blocks, 2);
|
return start_transaction(r, num_blocks, TRANS_USERSPACE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wait for a transaction commit to be fully complete */
|
/* wait for a transaction commit to be fully complete */
|
||||||
|
Loading…
Reference in New Issue
Block a user