mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 14:43:16 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: btrfs: fix inconsonant inode information Btrfs: make sure to update total_bitmaps when freeing cache V3 Btrfs: fix type mismatch in find_free_extent() Btrfs: make sure to record the transid in new inodes
This commit is contained in:
commit
af4087e0e6
@ -82,6 +82,39 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
|
||||
return root->fs_info->delayed_root;
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
|
||||
{
|
||||
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
|
||||
struct btrfs_root *root = btrfs_inode->root;
|
||||
u64 ino = btrfs_ino(inode);
|
||||
struct btrfs_delayed_node *node;
|
||||
|
||||
node = ACCESS_ONCE(btrfs_inode->delayed_node);
|
||||
if (node) {
|
||||
atomic_inc(&node->refs);
|
||||
return node;
|
||||
}
|
||||
|
||||
spin_lock(&root->inode_lock);
|
||||
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
|
||||
if (node) {
|
||||
if (btrfs_inode->delayed_node) {
|
||||
atomic_inc(&node->refs); /* can be accessed */
|
||||
BUG_ON(btrfs_inode->delayed_node != node);
|
||||
spin_unlock(&root->inode_lock);
|
||||
return node;
|
||||
}
|
||||
btrfs_inode->delayed_node = node;
|
||||
atomic_inc(&node->refs); /* can be accessed */
|
||||
atomic_inc(&node->refs); /* cached in the inode */
|
||||
spin_unlock(&root->inode_lock);
|
||||
return node;
|
||||
}
|
||||
spin_unlock(&root->inode_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
|
||||
struct inode *inode)
|
||||
{
|
||||
@ -92,26 +125,9 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
|
||||
int ret;
|
||||
|
||||
again:
|
||||
node = ACCESS_ONCE(btrfs_inode->delayed_node);
|
||||
if (node) {
|
||||
atomic_inc(&node->refs); /* can be accessed */
|
||||
node = btrfs_get_delayed_node(inode);
|
||||
if (node)
|
||||
return node;
|
||||
}
|
||||
|
||||
spin_lock(&root->inode_lock);
|
||||
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
|
||||
if (node) {
|
||||
if (btrfs_inode->delayed_node) {
|
||||
spin_unlock(&root->inode_lock);
|
||||
goto again;
|
||||
}
|
||||
btrfs_inode->delayed_node = node;
|
||||
atomic_inc(&node->refs); /* can be accessed */
|
||||
atomic_inc(&node->refs); /* cached in the inode */
|
||||
spin_unlock(&root->inode_lock);
|
||||
return node;
|
||||
}
|
||||
spin_unlock(&root->inode_lock);
|
||||
|
||||
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
|
||||
if (!node)
|
||||
@ -548,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
|
||||
struct inode *inode)
|
||||
{
|
||||
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
|
||||
struct btrfs_delayed_node *delayed_node;
|
||||
|
||||
delayed_node = btrfs_inode->delayed_node;
|
||||
if (delayed_node)
|
||||
atomic_inc(&delayed_node->refs);
|
||||
|
||||
return delayed_node;
|
||||
}
|
||||
|
||||
static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
|
||||
u64 root_id)
|
||||
{
|
||||
@ -1404,8 +1407,7 @@ end:
|
||||
|
||||
int btrfs_inode_delayed_dir_index_count(struct inode *inode)
|
||||
{
|
||||
struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
|
||||
int ret = 0;
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
|
||||
if (!delayed_node)
|
||||
return -ENOENT;
|
||||
@ -1415,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
|
||||
* a new directory index is added into the delayed node and index_cnt
|
||||
* is updated now. So we needn't lock the delayed node.
|
||||
*/
|
||||
if (!delayed_node->index_cnt)
|
||||
if (!delayed_node->index_cnt) {
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
|
||||
return ret;
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
|
||||
@ -1613,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
|
||||
inode->i_ctime.tv_nsec);
|
||||
}
|
||||
|
||||
int btrfs_fill_inode(struct inode *inode, u32 *rdev)
|
||||
{
|
||||
struct btrfs_delayed_node *delayed_node;
|
||||
struct btrfs_inode_item *inode_item;
|
||||
struct btrfs_timespec *tspec;
|
||||
|
||||
delayed_node = btrfs_get_delayed_node(inode);
|
||||
if (!delayed_node)
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
if (!delayed_node->inode_dirty) {
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
inode_item = &delayed_node->inode_item;
|
||||
|
||||
inode->i_uid = btrfs_stack_inode_uid(inode_item);
|
||||
inode->i_gid = btrfs_stack_inode_gid(inode_item);
|
||||
btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
|
||||
inode->i_mode = btrfs_stack_inode_mode(inode_item);
|
||||
inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
|
||||
inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
|
||||
BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
|
||||
BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
|
||||
inode->i_rdev = 0;
|
||||
*rdev = btrfs_stack_inode_rdev(inode_item);
|
||||
BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
|
||||
|
||||
tspec = btrfs_inode_atime(inode_item);
|
||||
inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
|
||||
inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
|
||||
|
||||
tspec = btrfs_inode_mtime(inode_item);
|
||||
inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
|
||||
inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
|
||||
|
||||
tspec = btrfs_inode_ctime(inode_item);
|
||||
inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
|
||||
inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
|
||||
|
||||
inode->i_generation = BTRFS_I(inode)->generation;
|
||||
BTRFS_I(inode)->index_cnt = (u64)-1;
|
||||
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
|
@ -119,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
|
||||
|
||||
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode);
|
||||
int btrfs_fill_inode(struct inode *inode, u32 *rdev);
|
||||
|
||||
/* Used for drop dead root */
|
||||
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
|
||||
|
@ -4842,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
||||
u64 num_bytes, u64 empty_size,
|
||||
u64 search_start, u64 search_end,
|
||||
u64 hint_byte, struct btrfs_key *ins,
|
||||
int data)
|
||||
u64 data)
|
||||
{
|
||||
int ret = 0;
|
||||
struct btrfs_root *root = orig_root->fs_info->extent_root;
|
||||
@ -4869,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
|
||||
|
||||
space_info = __find_space_info(root->fs_info, data);
|
||||
if (!space_info) {
|
||||
printk(KERN_ERR "No space info for %d\n", data);
|
||||
printk(KERN_ERR "No space info for %llu\n", data);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
|
@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
|
||||
|
||||
while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
|
||||
info = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||
unlink_free_space(ctl, info);
|
||||
kfree(info->bitmap);
|
||||
kmem_cache_free(btrfs_free_space_cachep, info);
|
||||
if (!info->bitmap) {
|
||||
unlink_free_space(ctl, info);
|
||||
kmem_cache_free(btrfs_free_space_cachep, info);
|
||||
} else {
|
||||
free_bitmap(ctl, info);
|
||||
}
|
||||
if (need_resched()) {
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
cond_resched();
|
||||
|
@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
|
||||
int maybe_acls;
|
||||
u32 rdev;
|
||||
int ret;
|
||||
bool filled = false;
|
||||
|
||||
ret = btrfs_fill_inode(inode, &rdev);
|
||||
if (!ret)
|
||||
filled = true;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
|
||||
goto make_bad;
|
||||
|
||||
leaf = path->nodes[0];
|
||||
|
||||
if (filled)
|
||||
goto cache_acl;
|
||||
|
||||
inode_item = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_inode_item);
|
||||
if (!leaf->map_token)
|
||||
@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
|
||||
|
||||
BTRFS_I(inode)->index_cnt = (u64)-1;
|
||||
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
|
||||
|
||||
cache_acl:
|
||||
/*
|
||||
* try to precache a NULL acl entry for files that don't have
|
||||
* any xattrs or acls
|
||||
@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
|
||||
}
|
||||
|
||||
btrfs_free_path(path);
|
||||
inode_item = NULL;
|
||||
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
@ -4520,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
|
||||
inode_tree_add(inode);
|
||||
|
||||
trace_btrfs_inode_new(inode);
|
||||
btrfs_set_inode_last_trans(trans, inode);
|
||||
|
||||
return inode;
|
||||
fail:
|
||||
|
Loading…
x
Reference in New Issue
Block a user