mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
Ext4 bug fixes and cleanups for 6.9-rc1, plus some additional kunit
tests. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAmXydHkACgkQ8vlZVpUN gaPFcQf/e1DcEw7dITXoOJ16Sz3pI3ykFEae3aIp1C0DoBL6ncnx4NrKJlbKVmfG CvYwwaPIILps0W5gwRll0wG8G9wrx+QY+xx5elsFKlfLsiRmkvXEIFPELYvtblcG u6fXumpArtH2dbjsmxw+gxEuborl3aeOIWW62dVvarEpfdvFlEwMAfBYlJ/E4HKM z74CmR09sr51XuQZTKaUNioyS6qNR/HIBoelJ50Xt6qLZrpfyIxtU/wHbN1GAM5+ pBXCYxlBaiSJHb8p9R99DT5JqVD5zwrqWscbajEhOJo4QQQacJGGvIOHz6b6FMRV +dPnTBh79t8DAktqT6LAf83bmiRCWQ== =4/t9 -----END PGP SIGNATURE----- Merge tag 'ext4_for_linus-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4 Pull ext4 updates from Ted Ts'o: "Ext4 bug fixes and cleanups, plus some additional kunit tests" * tag 'ext4_for_linus-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (23 commits) ext4: initialize sbi->s_freeclusters_counter and sbi->s_dirtyclusters_counter before use in kunit test ext4: hold group lock in ext4 kunit test ext4: alloc test super block from sget ext4: kunit: use dynamic inode allocation ext4: enable meta_bg only when new desc blocks are needed ext4: remove unused parameter biop in ext4_issue_discard() ext4: remove SLAB_MEM_SPREAD flag usage ext4: verify s_clusters_per_group even without bigalloc ext4: fix corruption during on-line resize ext4: don't report EOPNOTSUPP errors from discard ext4: drop duplicate ea_inode handling in ext4_xattr_block_set() ext4: fold quota accounting into ext4_xattr_inode_lookup_create() ext4: correct best extent lstart adjustment logic ext4: forbid commit inconsistent quota data when errors=remount-ro ext4: add a hint for block bitmap corrupt state in mb_groups ext4: fix the comment of ext4_map_blocks()/ext4_ext_map_blocks() ext4: improve error msg for ext4_mb_seq_groups_show ext4: remove unused buddy_loaded in ext4_mb_seq_groups_show ext4: Add unit test for ext4_mb_mark_diskspace_used ext4: Add unit test for mb_free_blocks ...
This commit is contained in:
commit
68bf6bfdcf
@ -4111,10 +4111,10 @@ static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
|
||||
*
|
||||
* Need to be called with
|
||||
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
|
||||
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
|
||||
* (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
|
||||
*
|
||||
* return > 0, number of blocks already mapped/allocated
|
||||
* if create == 0 and these are pre-allocated blocks
|
||||
* if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks
|
||||
* buffer head is unmapped
|
||||
* otherwise blocks are mapped
|
||||
*
|
||||
@ -4218,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
|
||||
/*
|
||||
* requested block isn't allocated yet;
|
||||
* we couldn't try to create block if create flag is zero
|
||||
* we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE
|
||||
*/
|
||||
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
||||
ext4_lblk_t len;
|
||||
|
@ -465,9 +465,10 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
|
||||
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
|
||||
* based files
|
||||
*
|
||||
* On success, it returns the number of blocks being mapped or allocated. if
|
||||
* create==0 and the blocks are pre-allocated and unwritten, the resulting @map
|
||||
* is marked as unwritten. If the create == 1, it will mark @map as mapped.
|
||||
* On success, it returns the number of blocks being mapped or allocated.
|
||||
* If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
|
||||
* pre-allocated and unwritten, the resulting @map is marked as unwritten.
|
||||
* If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
|
||||
*
|
||||
* It returns 0 if plain look up failed (blocks have not been allocated), in
|
||||
* that case, @map is returned as unmapped but we still do fill map->m_len to
|
||||
@ -589,8 +590,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
* Returns if the blocks have already allocated
|
||||
*
|
||||
* Note that if blocks have been preallocated
|
||||
* ext4_ext_get_block() returns the create = 0
|
||||
* with buffer head unmapped.
|
||||
* ext4_ext_map_blocks() returns with buffer head unmapped
|
||||
*/
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
|
||||
/*
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <kunit/test.h>
|
||||
#include <kunit/static_stub.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include "ext4.h"
|
||||
|
||||
@ -20,41 +21,135 @@ struct mbt_ctx {
|
||||
};
|
||||
|
||||
struct mbt_ext4_super_block {
|
||||
struct super_block sb;
|
||||
struct ext4_super_block es;
|
||||
struct ext4_sb_info sbi;
|
||||
struct mbt_ctx mbt_ctx;
|
||||
};
|
||||
|
||||
#define MBT_CTX(_sb) (&(container_of((_sb), struct mbt_ext4_super_block, sb)->mbt_ctx))
|
||||
#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
|
||||
#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
|
||||
#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
|
||||
|
||||
static const struct super_operations mbt_sops = {
|
||||
};
|
||||
|
||||
static void mbt_kill_sb(struct super_block *sb)
|
||||
{
|
||||
generic_shutdown_super(sb);
|
||||
}
|
||||
|
||||
static struct file_system_type mbt_fs_type = {
|
||||
.name = "mballoc test",
|
||||
.kill_sb = mbt_kill_sb,
|
||||
};
|
||||
|
||||
static int mbt_mb_init(struct super_block *sb)
|
||||
{
|
||||
ext4_fsblk_t block;
|
||||
int ret;
|
||||
|
||||
/* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
|
||||
sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
|
||||
if (sb->s_bdev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
|
||||
if (sb->s_bdev->bd_queue == NULL) {
|
||||
kfree(sb->s_bdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
|
||||
* new_inode(sb);
|
||||
*/
|
||||
INIT_LIST_HEAD(&sb->s_inodes);
|
||||
sb->s_op = &mbt_sops;
|
||||
|
||||
ret = ext4_mb_init(sb);
|
||||
if (ret != 0)
|
||||
goto err_out;
|
||||
|
||||
block = ext4_count_free_clusters(sb);
|
||||
ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
|
||||
GFP_KERNEL);
|
||||
if (ret != 0)
|
||||
goto err_mb_release;
|
||||
|
||||
ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
|
||||
GFP_KERNEL);
|
||||
if (ret != 0)
|
||||
goto err_freeclusters;
|
||||
|
||||
return 0;
|
||||
|
||||
err_freeclusters:
|
||||
percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
|
||||
err_mb_release:
|
||||
ext4_mb_release(sb);
|
||||
err_out:
|
||||
kfree(sb->s_bdev->bd_queue);
|
||||
kfree(sb->s_bdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mbt_mb_release(struct super_block *sb)
|
||||
{
|
||||
percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
|
||||
percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
|
||||
ext4_mb_release(sb);
|
||||
kfree(sb->s_bdev->bd_queue);
|
||||
kfree(sb->s_bdev);
|
||||
}
|
||||
|
||||
static int mbt_set(struct super_block *sb, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct super_block *mbt_ext4_alloc_super_block(void)
|
||||
{
|
||||
struct ext4_super_block *es = kzalloc(sizeof(*es), GFP_KERNEL);
|
||||
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
||||
struct mbt_ext4_super_block *fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
|
||||
struct mbt_ext4_super_block *fsb;
|
||||
struct super_block *sb;
|
||||
struct ext4_sb_info *sbi;
|
||||
|
||||
if (fsb == NULL || sbi == NULL || es == NULL)
|
||||
fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
|
||||
if (fsb == NULL)
|
||||
return NULL;
|
||||
|
||||
sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
|
||||
if (IS_ERR(sb))
|
||||
goto out;
|
||||
|
||||
sbi->s_es = es;
|
||||
fsb->sb.s_fs_info = sbi;
|
||||
return &fsb->sb;
|
||||
sbi = &fsb->sbi;
|
||||
|
||||
sbi->s_blockgroup_lock =
|
||||
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
|
||||
if (!sbi->s_blockgroup_lock)
|
||||
goto out_deactivate;
|
||||
|
||||
bgl_lock_init(sbi->s_blockgroup_lock);
|
||||
|
||||
sbi->s_es = &fsb->es;
|
||||
sb->s_fs_info = sbi;
|
||||
|
||||
up_write(&sb->s_umount);
|
||||
return sb;
|
||||
|
||||
out_deactivate:
|
||||
deactivate_locked_super(sb);
|
||||
out:
|
||||
kfree(fsb);
|
||||
kfree(sbi);
|
||||
kfree(es);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void mbt_ext4_free_super_block(struct super_block *sb)
|
||||
{
|
||||
struct mbt_ext4_super_block *fsb =
|
||||
container_of(sb, struct mbt_ext4_super_block, sb);
|
||||
struct mbt_ext4_super_block *fsb = MBT_SB(sb);
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
||||
kfree(sbi->s_es);
|
||||
kfree(sbi);
|
||||
kfree(sbi->s_blockgroup_lock);
|
||||
deactivate_super(sb);
|
||||
kfree(fsb);
|
||||
}
|
||||
|
||||
@ -82,6 +177,9 @@ static void mbt_init_sb_layout(struct super_block *sb,
|
||||
sbi->s_clusters_per_group = layout->blocks_per_group >>
|
||||
layout->cluster_bits;
|
||||
sbi->s_desc_size = layout->desc_size;
|
||||
sbi->s_desc_per_block_bits =
|
||||
sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
|
||||
sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
|
||||
|
||||
es->s_first_data_block = cpu_to_le32(0);
|
||||
es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
|
||||
@ -91,9 +189,13 @@ static void mbt_init_sb_layout(struct super_block *sb,
|
||||
static int mbt_grp_ctx_init(struct super_block *sb,
|
||||
struct mbt_grp_ctx *grp_ctx)
|
||||
{
|
||||
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
|
||||
grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
|
||||
if (grp_ctx->bitmap_bh.b_data == NULL)
|
||||
return -ENOMEM;
|
||||
mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
|
||||
ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -112,6 +214,13 @@ static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
|
||||
mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
|
||||
}
|
||||
|
||||
static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
|
||||
{
|
||||
struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
|
||||
|
||||
return grp_ctx->bitmap_bh.b_data;
|
||||
}
|
||||
|
||||
/* called after mbt_init_sb_layout */
|
||||
static int mbt_ctx_init(struct super_block *sb)
|
||||
{
|
||||
@ -133,6 +242,8 @@ static int mbt_ctx_init(struct super_block *sb)
|
||||
* block which will fail ext4_sb_block_valid check.
|
||||
*/
|
||||
mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
|
||||
ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
|
||||
EXT4_CLUSTERS_PER_GROUP(sb) - 1);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
@ -167,6 +278,13 @@ static int ext4_wait_block_bitmap_stub(struct super_block *sb,
|
||||
ext4_group_t block_group,
|
||||
struct buffer_head *bh)
|
||||
{
|
||||
/*
|
||||
* real ext4_wait_block_bitmap will set these flags and
|
||||
* functions like ext4_mb_init_cache will verify the flags.
|
||||
*/
|
||||
set_buffer_uptodate(bh);
|
||||
set_bitmap_uptodate(bh);
|
||||
set_buffer_verified(bh);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -232,6 +350,14 @@ static int mbt_kunit_init(struct kunit *test)
|
||||
kunit_activate_static_stub(test,
|
||||
ext4_mb_mark_context,
|
||||
ext4_mb_mark_context_stub);
|
||||
|
||||
/* stub function will be called in mbt_mb_init->ext4_mb_init */
|
||||
if (mbt_mb_init(sb) != 0) {
|
||||
mbt_ctx_release(sb);
|
||||
mbt_ext4_free_super_block(sb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -239,6 +365,7 @@ static void mbt_kunit_exit(struct kunit *test)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
|
||||
mbt_mb_release(sb);
|
||||
mbt_ctx_release(sb);
|
||||
mbt_ext4_free_super_block(sb);
|
||||
}
|
||||
@ -246,14 +373,19 @@ static void mbt_kunit_exit(struct kunit *test)
|
||||
static void test_new_blocks_simple(struct kunit *test)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
struct inode inode = { .i_sb = sb, };
|
||||
struct inode *inode;
|
||||
struct ext4_allocation_request ar;
|
||||
ext4_group_t i, goal_group = TEST_GOAL_GROUP;
|
||||
int err = 0;
|
||||
ext4_fsblk_t found;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
||||
ar.inode = &inode;
|
||||
inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
|
||||
if (!inode)
|
||||
return;
|
||||
|
||||
inode->i_sb = sb;
|
||||
ar.inode = inode;
|
||||
|
||||
/* get block at goal */
|
||||
ar.goal = ext4_group_first_block_no(sb, goal_group);
|
||||
@ -297,6 +429,436 @@ static void test_new_blocks_simple(struct kunit *test)
|
||||
"unexpectedly get block when no block is available");
|
||||
}
|
||||
|
||||
#define TEST_RANGE_COUNT 8
|
||||
|
||||
struct test_range {
|
||||
ext4_grpblk_t start;
|
||||
ext4_grpblk_t len;
|
||||
};
|
||||
|
||||
static void
|
||||
mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
|
||||
int count)
|
||||
{
|
||||
ext4_grpblk_t start, len, max;
|
||||
int i;
|
||||
|
||||
max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
|
||||
for (i = 0; i < count; i++) {
|
||||
start = get_random_u32() % max;
|
||||
len = get_random_u32() % max;
|
||||
len = min(len, max - start);
|
||||
|
||||
ranges[i].start = start + i * max;
|
||||
ranges[i].len = len;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
|
||||
ext4_group_t goal_group, ext4_grpblk_t start,
|
||||
ext4_grpblk_t len)
|
||||
{
|
||||
void *bitmap;
|
||||
ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
ext4_group_t i;
|
||||
|
||||
for (i = 0; i < ext4_get_groups_count(sb); i++) {
|
||||
if (i == goal_group)
|
||||
continue;
|
||||
|
||||
bitmap = mbt_ctx_bitmap(sb, i);
|
||||
bit = mb_find_next_zero_bit(bitmap, max, 0);
|
||||
KUNIT_ASSERT_EQ_MSG(test, bit, max,
|
||||
"free block on unexpected group %d", i);
|
||||
}
|
||||
|
||||
bitmap = mbt_ctx_bitmap(sb, goal_group);
|
||||
bit = mb_find_next_zero_bit(bitmap, max, 0);
|
||||
KUNIT_ASSERT_EQ(test, bit, start);
|
||||
|
||||
bit = mb_find_next_bit(bitmap, max, bit + 1);
|
||||
KUNIT_ASSERT_EQ(test, bit, start + len);
|
||||
}
|
||||
|
||||
static void
|
||||
test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
|
||||
ext4_grpblk_t start, ext4_grpblk_t len)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct inode *inode;
|
||||
ext4_fsblk_t block;
|
||||
|
||||
inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
|
||||
if (!inode)
|
||||
return;
|
||||
inode->i_sb = sb;
|
||||
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
block = ext4_group_first_block_no(sb, goal_group) +
|
||||
EXT4_C2B(sbi, start);
|
||||
ext4_free_blocks_simple(inode, block, len);
|
||||
validate_free_blocks_simple(test, sb, goal_group, start, len);
|
||||
mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
|
||||
}
|
||||
|
||||
static void test_free_blocks_simple(struct kunit *test)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
ext4_group_t i;
|
||||
struct test_range ranges[TEST_RANGE_COUNT];
|
||||
|
||||
for (i = 0; i < ext4_get_groups_count(sb); i++)
|
||||
mbt_ctx_mark_used(sb, i, 0, max);
|
||||
|
||||
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
|
||||
for (i = 0; i < TEST_RANGE_COUNT; i++)
|
||||
test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
|
||||
ranges[i].start, ranges[i].len);
|
||||
}
|
||||
|
||||
static void
|
||||
test_mark_diskspace_used_range(struct kunit *test,
|
||||
struct ext4_allocation_context *ac,
|
||||
ext4_grpblk_t start,
|
||||
ext4_grpblk_t len)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
int ret;
|
||||
void *bitmap;
|
||||
ext4_grpblk_t i, max;
|
||||
|
||||
/* ext4_mb_mark_diskspace_used will BUG if len is 0 */
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
|
||||
ac->ac_b_ex.fe_start = start;
|
||||
ac->ac_b_ex.fe_len = len;
|
||||
|
||||
bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
|
||||
memset(bitmap, 0, sb->s_blocksize);
|
||||
ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
|
||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||
|
||||
max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
i = mb_find_next_bit(bitmap, max, 0);
|
||||
KUNIT_ASSERT_EQ(test, i, start);
|
||||
i = mb_find_next_zero_bit(bitmap, max, i + 1);
|
||||
KUNIT_ASSERT_EQ(test, i, start + len);
|
||||
i = mb_find_next_bit(bitmap, max, i + 1);
|
||||
KUNIT_ASSERT_EQ(test, max, i);
|
||||
}
|
||||
|
||||
static void test_mark_diskspace_used(struct kunit *test)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
struct inode *inode;
|
||||
struct ext4_allocation_context ac;
|
||||
struct test_range ranges[TEST_RANGE_COUNT];
|
||||
int i;
|
||||
|
||||
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
|
||||
|
||||
inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
|
||||
if (!inode)
|
||||
return;
|
||||
inode->i_sb = sb;
|
||||
|
||||
ac.ac_status = AC_STATUS_FOUND;
|
||||
ac.ac_sb = sb;
|
||||
ac.ac_inode = inode;
|
||||
for (i = 0; i < TEST_RANGE_COUNT; i++)
|
||||
test_mark_diskspace_used_range(test, &ac, ranges[i].start,
|
||||
ranges[i].len);
|
||||
}
|
||||
|
||||
static void mbt_generate_buddy(struct super_block *sb, void *buddy,
|
||||
void *bitmap, struct ext4_group_info *grp)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
uint32_t order, off;
|
||||
void *bb, *bb_h;
|
||||
int max;
|
||||
|
||||
memset(buddy, 0xff, sb->s_blocksize);
|
||||
memset(grp, 0, offsetof(struct ext4_group_info,
|
||||
bb_counters[MB_NUM_ORDERS(sb)]));
|
||||
|
||||
bb = bitmap;
|
||||
max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
bb_h = buddy + sbi->s_mb_offsets[1];
|
||||
|
||||
off = mb_find_next_zero_bit(bb, max, 0);
|
||||
grp->bb_first_free = off;
|
||||
while (off < max) {
|
||||
grp->bb_counters[0]++;
|
||||
grp->bb_free++;
|
||||
|
||||
if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
|
||||
grp->bb_free++;
|
||||
grp->bb_counters[0]--;
|
||||
mb_clear_bit(off >> 1, bb_h);
|
||||
grp->bb_counters[1]++;
|
||||
grp->bb_largest_free_order = 1;
|
||||
off++;
|
||||
}
|
||||
|
||||
off = mb_find_next_zero_bit(bb, max, off + 1);
|
||||
}
|
||||
|
||||
for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
|
||||
bb = buddy + sbi->s_mb_offsets[order];
|
||||
bb_h = buddy + sbi->s_mb_offsets[order + 1];
|
||||
max = max >> 1;
|
||||
off = mb_find_next_zero_bit(bb, max, 0);
|
||||
|
||||
while (off < max) {
|
||||
if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
|
||||
mb_set_bits(bb, off, 2);
|
||||
grp->bb_counters[order] -= 2;
|
||||
mb_clear_bit(off >> 1, bb_h);
|
||||
grp->bb_counters[order + 1]++;
|
||||
grp->bb_largest_free_order = order + 1;
|
||||
off++;
|
||||
}
|
||||
|
||||
off = mb_find_next_zero_bit(bb, max, off + 1);
|
||||
}
|
||||
}
|
||||
|
||||
max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
off = mb_find_next_zero_bit(bitmap, max, 0);
|
||||
while (off < max) {
|
||||
grp->bb_fragments++;
|
||||
|
||||
off = mb_find_next_bit(bitmap, max, off + 1);
|
||||
if (off + 1 >= max)
|
||||
break;
|
||||
|
||||
off = mb_find_next_zero_bit(bitmap, max, off + 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
|
||||
struct ext4_group_info *grp2)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
int i;
|
||||
|
||||
KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
|
||||
grp2->bb_first_free);
|
||||
KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
|
||||
grp2->bb_fragments);
|
||||
KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
|
||||
KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
|
||||
grp2->bb_largest_free_order);
|
||||
|
||||
for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
|
||||
KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
|
||||
grp2->bb_counters[i],
|
||||
"bb_counters[%d] diffs, expected %d, generated %d",
|
||||
i, grp1->bb_counters[i],
|
||||
grp2->bb_counters[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
|
||||
void *mbt_buddy, struct ext4_group_info *mbt_grp,
|
||||
void *ext4_buddy, struct ext4_group_info *ext4_grp)
|
||||
{
|
||||
int i;
|
||||
|
||||
mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
|
||||
|
||||
for (i = 0; i < MB_NUM_ORDERS(sb); i++)
|
||||
ext4_grp->bb_counters[i] = 0;
|
||||
/* needed by validation in ext4_mb_generate_buddy */
|
||||
ext4_grp->bb_free = mbt_grp->bb_free;
|
||||
memset(ext4_buddy, 0xff, sb->s_blocksize);
|
||||
ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
|
||||
ext4_grp);
|
||||
|
||||
KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
|
||||
0);
|
||||
mbt_validate_group_info(test, mbt_grp, ext4_grp);
|
||||
}
|
||||
|
||||
static void test_mb_generate_buddy(struct kunit *test)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
void *bitmap, *expected_bb, *generate_bb;
|
||||
struct ext4_group_info *expected_grp, *generate_grp;
|
||||
struct test_range ranges[TEST_RANGE_COUNT];
|
||||
int i;
|
||||
|
||||
bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
|
||||
expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
|
||||
generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
|
||||
expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
|
||||
bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
|
||||
generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
|
||||
KUNIT_ASSERT_NOT_NULL(test, generate_grp);
|
||||
|
||||
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
|
||||
for (i = 0; i < TEST_RANGE_COUNT; i++) {
|
||||
mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
|
||||
do_test_generate_buddy(test, sb, bitmap, expected_bb,
|
||||
expected_grp, generate_bb, generate_grp);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
|
||||
ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
|
||||
void *buddy, struct ext4_group_info *grp)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
struct ext4_free_extent ex;
|
||||
int i;
|
||||
|
||||
/* mb_mark_used only accepts non-zero len */
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
ex.fe_start = start;
|
||||
ex.fe_len = len;
|
||||
ex.fe_group = TEST_GOAL_GROUP;
|
||||
|
||||
ext4_lock_group(sb, TEST_GOAL_GROUP);
|
||||
mb_mark_used(e4b, &ex);
|
||||
ext4_unlock_group(sb, TEST_GOAL_GROUP);
|
||||
|
||||
mb_set_bits(bitmap, start, len);
|
||||
/* bypass bb_free validatoin in ext4_mb_generate_buddy */
|
||||
grp->bb_free -= len;
|
||||
memset(buddy, 0xff, sb->s_blocksize);
|
||||
for (i = 0; i < MB_NUM_ORDERS(sb); i++)
|
||||
grp->bb_counters[i] = 0;
|
||||
ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
|
||||
|
||||
KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
|
||||
0);
|
||||
mbt_validate_group_info(test, grp, e4b->bd_info);
|
||||
}
|
||||
|
||||
static void test_mb_mark_used(struct kunit *test)
|
||||
{
|
||||
struct ext4_buddy e4b;
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
void *bitmap, *buddy;
|
||||
struct ext4_group_info *grp;
|
||||
int ret;
|
||||
struct test_range ranges[TEST_RANGE_COUNT];
|
||||
int i;
|
||||
|
||||
/* buddy cache assumes that each page contains at least one block */
|
||||
if (sb->s_blocksize > PAGE_SIZE)
|
||||
kunit_skip(test, "blocksize exceeds pagesize");
|
||||
|
||||
bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
|
||||
buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
|
||||
grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
|
||||
bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
|
||||
|
||||
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
|
||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||
|
||||
grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
|
||||
for (i = 0; i < TEST_RANGE_COUNT; i++)
|
||||
test_mb_mark_used_range(test, &e4b, ranges[i].start,
|
||||
ranges[i].len, bitmap, buddy, grp);
|
||||
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
}
|
||||
|
||||
static void
|
||||
test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
|
||||
ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
|
||||
void *buddy, struct ext4_group_info *grp)
|
||||
{
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
int i;
|
||||
|
||||
/* mb_free_blocks will WARN if len is 0 */
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
ext4_lock_group(sb, e4b->bd_group);
|
||||
mb_free_blocks(NULL, e4b, start, len);
|
||||
ext4_unlock_group(sb, e4b->bd_group);
|
||||
|
||||
mb_clear_bits(bitmap, start, len);
|
||||
/* bypass bb_free validatoin in ext4_mb_generate_buddy */
|
||||
grp->bb_free += len;
|
||||
memset(buddy, 0xff, sb->s_blocksize);
|
||||
for (i = 0; i < MB_NUM_ORDERS(sb); i++)
|
||||
grp->bb_counters[i] = 0;
|
||||
ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
|
||||
|
||||
KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
|
||||
0);
|
||||
mbt_validate_group_info(test, grp, e4b->bd_info);
|
||||
|
||||
}
|
||||
|
||||
static void test_mb_free_blocks(struct kunit *test)
|
||||
{
|
||||
struct ext4_buddy e4b;
|
||||
struct super_block *sb = (struct super_block *)test->priv;
|
||||
void *bitmap, *buddy;
|
||||
struct ext4_group_info *grp;
|
||||
struct ext4_free_extent ex;
|
||||
int ret;
|
||||
int i;
|
||||
struct test_range ranges[TEST_RANGE_COUNT];
|
||||
|
||||
/* buddy cache assumes that each page contains at least one block */
|
||||
if (sb->s_blocksize > PAGE_SIZE)
|
||||
kunit_skip(test, "blocksize exceeds pagesize");
|
||||
|
||||
bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
|
||||
buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
|
||||
grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
|
||||
bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
|
||||
|
||||
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
|
||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||
|
||||
ex.fe_start = 0;
|
||||
ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||
ex.fe_group = TEST_GOAL_GROUP;
|
||||
|
||||
ext4_lock_group(sb, TEST_GOAL_GROUP);
|
||||
mb_mark_used(&e4b, &ex);
|
||||
ext4_unlock_group(sb, TEST_GOAL_GROUP);
|
||||
|
||||
grp->bb_free = 0;
|
||||
memset(bitmap, 0xff, sb->s_blocksize);
|
||||
|
||||
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
|
||||
for (i = 0; i < TEST_RANGE_COUNT; i++)
|
||||
test_mb_free_blocks_range(test, &e4b, ranges[i].start,
|
||||
ranges[i].len, bitmap, buddy, grp);
|
||||
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
}
|
||||
|
||||
static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
|
||||
{
|
||||
.blocksize_bits = 10,
|
||||
@ -334,6 +896,11 @@ KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
|
||||
|
||||
static struct kunit_case mbt_test_cases[] = {
|
||||
KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
|
||||
KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
|
||||
KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
|
||||
KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
|
||||
KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
|
||||
KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -3015,8 +3015,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
ext4_group_t group = (ext4_group_t) ((unsigned long) v);
|
||||
int i;
|
||||
int err, buddy_loaded = 0;
|
||||
int i, err;
|
||||
char nbuf[16];
|
||||
struct ext4_buddy e4b;
|
||||
struct ext4_group_info *grinfo;
|
||||
unsigned char blocksize_bits = min_t(unsigned char,
|
||||
@ -3043,23 +3043,26 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
||||
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
|
||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
if (err) {
|
||||
seq_printf(seq, "#%-5u: I/O error\n", group);
|
||||
seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
|
||||
return 0;
|
||||
}
|
||||
buddy_loaded = 1;
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
}
|
||||
|
||||
/*
|
||||
* We care only about free space counters in the group info and
|
||||
* these are safe to access even after the buddy has been unloaded
|
||||
*/
|
||||
memcpy(&sg, grinfo, i);
|
||||
|
||||
if (buddy_loaded)
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
|
||||
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
|
||||
sg.info.bb_fragments, sg.info.bb_first_free);
|
||||
for (i = 0; i <= 13; i++)
|
||||
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
|
||||
sg.info.bb_counters[i] : 0);
|
||||
seq_puts(seq, " ]\n");
|
||||
seq_puts(seq, " ]");
|
||||
if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
|
||||
seq_puts(seq, " Block bitmap corrupted!");
|
||||
seq_puts(seq, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3829,8 +3832,7 @@ void ext4_mb_release(struct super_block *sb)
|
||||
}
|
||||
|
||||
static inline int ext4_issue_discard(struct super_block *sb,
|
||||
ext4_group_t block_group, ext4_grpblk_t cluster, int count,
|
||||
struct bio **biop)
|
||||
ext4_group_t block_group, ext4_grpblk_t cluster, int count)
|
||||
{
|
||||
ext4_fsblk_t discard_block;
|
||||
|
||||
@ -3839,13 +3841,8 @@ static inline int ext4_issue_discard(struct super_block *sb,
|
||||
count = EXT4_C2B(EXT4_SB(sb), count);
|
||||
trace_ext4_discard_blocks(sb,
|
||||
(unsigned long long) discard_block, count);
|
||||
if (biop) {
|
||||
return __blkdev_issue_discard(sb->s_bdev,
|
||||
(sector_t)discard_block << (sb->s_blocksize_bits - 9),
|
||||
(sector_t)count << (sb->s_blocksize_bits - 9),
|
||||
GFP_NOFS, biop);
|
||||
} else
|
||||
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
|
||||
|
||||
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
|
||||
}
|
||||
|
||||
static void ext4_free_data_in_buddy(struct super_block *sb,
|
||||
@ -5169,10 +5166,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||||
.fe_len = ac->ac_orig_goal_len,
|
||||
};
|
||||
loff_t orig_goal_end = extent_logical_end(sbi, &ex);
|
||||
loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
|
||||
|
||||
/* we can't allocate as much as normalizer wants.
|
||||
* so, found space must get proper lstart
|
||||
* to cover original request */
|
||||
/*
|
||||
* We can't allocate as much as normalizer wants, so we try
|
||||
* to get proper lstart to cover the original request, except
|
||||
* when the goal doesn't cover the original request as below:
|
||||
*
|
||||
* orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
|
||||
* best_ex:0/200(200) -> adjusted: 1848/2048(200)
|
||||
*/
|
||||
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
|
||||
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
|
||||
|
||||
@ -5184,7 +5187,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||||
* 1. Check if best ex can be kept at end of goal (before
|
||||
* cr_best_avail trimmed it) and still cover original start
|
||||
* 2. Else, check if best ex can be kept at start of goal and
|
||||
* still cover original start
|
||||
* still cover original end
|
||||
* 3. Else, keep the best ex at start of original request.
|
||||
*/
|
||||
ex.fe_len = ac->ac_b_ex.fe_len;
|
||||
@ -5194,7 +5197,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||||
goto adjust_bex;
|
||||
|
||||
ex.fe_logical = ac->ac_g_ex.fe_logical;
|
||||
if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
|
||||
if (o_ex_end <= extent_logical_end(sbi, &ex))
|
||||
goto adjust_bex;
|
||||
|
||||
ex.fe_logical = ac->ac_o_ex.fe_logical;
|
||||
@ -5202,7 +5205,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||||
ac->ac_b_ex.fe_logical = ex.fe_logical;
|
||||
|
||||
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
|
||||
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
|
||||
BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
|
||||
}
|
||||
|
||||
@ -6487,8 +6489,14 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
|
||||
} else {
|
||||
if (test_opt(sb, DISCARD)) {
|
||||
err = ext4_issue_discard(sb, block_group, bit,
|
||||
count_clusters, NULL);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
count_clusters);
|
||||
/*
|
||||
* Ignore EOPNOTSUPP error. This is consistent with
|
||||
* what happens when using journal.
|
||||
*/
|
||||
if (err == -EOPNOTSUPP)
|
||||
err = 0;
|
||||
if (err)
|
||||
ext4_msg(sb, KERN_WARNING, "discard request in"
|
||||
" group:%u block:%d count:%lu failed"
|
||||
" with %d", block_group, bit, count,
|
||||
@ -6738,7 +6746,7 @@ __acquires(bitlock)
|
||||
*/
|
||||
mb_mark_used(e4b, &ex);
|
||||
ext4_unlock_group(sb, group);
|
||||
ret = ext4_issue_discard(sb, group, start, count, NULL);
|
||||
ret = ext4_issue_discard(sb, group, start, count);
|
||||
ext4_lock_group(sb, group);
|
||||
mb_free_blocks(NULL, e4b, start, ex.fe_len);
|
||||
return ret;
|
||||
|
@ -1602,7 +1602,8 @@ static int ext4_flex_group_add(struct super_block *sb,
|
||||
int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
|
||||
int gdb_num_end = ((group + flex_gd->count - 1) /
|
||||
EXT4_DESC_PER_BLOCK(sb));
|
||||
int meta_bg = ext4_has_feature_meta_bg(sb);
|
||||
int meta_bg = ext4_has_feature_meta_bg(sb) &&
|
||||
gdb_num >= le32_to_cpu(es->s_first_meta_bg);
|
||||
sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
|
||||
ext4_group_first_block_no(sb, 0);
|
||||
|
||||
@ -2084,7 +2085,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
|
||||
}
|
||||
}
|
||||
|
||||
if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
|
||||
if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) {
|
||||
err = ext4_convert_meta_bg(sb, resize_inode);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -4421,22 +4421,6 @@ static int ext4_handle_clustersize(struct super_block *sb)
|
||||
}
|
||||
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
|
||||
le32_to_cpu(es->s_log_block_size);
|
||||
sbi->s_clusters_per_group =
|
||||
le32_to_cpu(es->s_clusters_per_group);
|
||||
if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"#clusters per group too big: %lu",
|
||||
sbi->s_clusters_per_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sbi->s_blocks_per_group !=
|
||||
(sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
|
||||
ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
|
||||
"clusters per group (%lu) inconsistent",
|
||||
sbi->s_blocks_per_group,
|
||||
sbi->s_clusters_per_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (clustersize != sb->s_blocksize) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
@ -4450,9 +4434,21 @@ static int ext4_handle_clustersize(struct super_block *sb)
|
||||
sbi->s_blocks_per_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
sbi->s_clusters_per_group = sbi->s_blocks_per_group;
|
||||
sbi->s_cluster_bits = 0;
|
||||
}
|
||||
sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
|
||||
if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
|
||||
ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
|
||||
sbi->s_clusters_per_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sbi->s_blocks_per_group !=
|
||||
(sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"blocks per group (%lu) and clusters per group (%lu) inconsistent",
|
||||
sbi->s_blocks_per_group, sbi->s_clusters_per_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
|
||||
|
||||
/* Do we have standard group size of clustersize * 8 blocks ? */
|
||||
@ -6864,6 +6860,10 @@ static int ext4_write_dquot(struct dquot *dquot)
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
ret = dquot_commit(dquot);
|
||||
if (ret < 0)
|
||||
ext4_error_err(dquot->dq_sb, -ret,
|
||||
"Failed to commit dquot type %d",
|
||||
dquot->dq_id.type);
|
||||
err = ext4_journal_stop(handle);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
@ -6880,6 +6880,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
ret = dquot_acquire(dquot);
|
||||
if (ret < 0)
|
||||
ext4_error_err(dquot->dq_sb, -ret,
|
||||
"Failed to acquire dquot type %d",
|
||||
dquot->dq_id.type);
|
||||
err = ext4_journal_stop(handle);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
@ -6899,6 +6903,10 @@ static int ext4_release_dquot(struct dquot *dquot)
|
||||
return PTR_ERR(handle);
|
||||
}
|
||||
ret = dquot_release(dquot);
|
||||
if (ret < 0)
|
||||
ext4_error_err(dquot->dq_sb, -ret,
|
||||
"Failed to release dquot type %d",
|
||||
dquot->dq_id.type);
|
||||
err = ext4_journal_stop(handle);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
|
@ -1565,46 +1565,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
|
||||
/*
|
||||
* Add value of the EA in an inode.
|
||||
*/
|
||||
static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
|
||||
const void *value, size_t value_len,
|
||||
struct inode **ret_inode)
|
||||
static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
|
||||
struct inode *inode, const void *value, size_t value_len)
|
||||
{
|
||||
struct inode *ea_inode;
|
||||
u32 hash;
|
||||
int err;
|
||||
|
||||
/* Account inode & space to quota even if sharing... */
|
||||
err = ext4_xattr_inode_alloc_quota(inode, value_len);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
|
||||
ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
|
||||
if (ea_inode) {
|
||||
err = ext4_xattr_inode_inc_ref(handle, ea_inode);
|
||||
if (err) {
|
||||
iput(ea_inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
*ret_inode = ea_inode;
|
||||
return 0;
|
||||
if (err)
|
||||
goto out_err;
|
||||
return ea_inode;
|
||||
}
|
||||
|
||||
/* Create an inode for the EA value */
|
||||
ea_inode = ext4_xattr_inode_create(handle, inode, hash);
|
||||
if (IS_ERR(ea_inode))
|
||||
return PTR_ERR(ea_inode);
|
||||
if (IS_ERR(ea_inode)) {
|
||||
ext4_xattr_inode_free_quota(inode, NULL, value_len);
|
||||
return ea_inode;
|
||||
}
|
||||
|
||||
err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
|
||||
if (err) {
|
||||
if (ext4_xattr_inode_dec_ref(handle, ea_inode))
|
||||
ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
|
||||
iput(ea_inode);
|
||||
return err;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (EA_INODE_CACHE(inode))
|
||||
mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
|
||||
ea_inode->i_ino, true /* reusable */);
|
||||
|
||||
*ret_inode = ea_inode;
|
||||
return 0;
|
||||
return ea_inode;
|
||||
out_err:
|
||||
iput(ea_inode);
|
||||
ext4_xattr_inode_free_quota(inode, NULL, value_len);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1712,16 +1715,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
|
||||
if (i->value && in_inode) {
|
||||
WARN_ON_ONCE(!i->value_len);
|
||||
|
||||
ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
|
||||
i->value_len,
|
||||
&new_ea_inode);
|
||||
if (ret) {
|
||||
new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
|
||||
i->value, i->value_len);
|
||||
if (IS_ERR(new_ea_inode)) {
|
||||
ret = PTR_ERR(new_ea_inode);
|
||||
new_ea_inode = NULL;
|
||||
ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -2160,17 +2158,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
ENTRY(header(s->base)+1));
|
||||
if (error)
|
||||
goto getblk_failed;
|
||||
if (ea_inode) {
|
||||
/* Drop the extra ref on ea_inode. */
|
||||
error = ext4_xattr_inode_dec_ref(handle,
|
||||
ea_inode);
|
||||
if (error)
|
||||
ext4_warning_inode(ea_inode,
|
||||
"dec ref error=%d",
|
||||
error);
|
||||
iput(ea_inode);
|
||||
ea_inode = NULL;
|
||||
}
|
||||
|
||||
lock_buffer(new_bh);
|
||||
error = ext4_journal_get_create_access(handle, sb,
|
||||
|
Loading…
Reference in New Issue
Block a user