mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
bcachefs: kill bch2_dev_usage_update_m()
by using bucket_m_to_alloc() more, we can get some nice code cleanup. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
fa9bb741fe
commit
c02eb9e891
@ -39,6 +39,22 @@ static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
|
||||
return a.gen - a.oldest_gen;
|
||||
}
|
||||
|
||||
static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket b)
|
||||
{
|
||||
dst->gen = b.gen;
|
||||
dst->data_type = b.data_type;
|
||||
dst->dirty_sectors = b.dirty_sectors;
|
||||
dst->cached_sectors = b.cached_sectors;
|
||||
dst->stripe = b.stripe;
|
||||
}
|
||||
|
||||
static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
|
||||
{
|
||||
struct bch_alloc_v4 ret = {};
|
||||
__bucket_m_to_alloc(&ret, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
|
||||
{
|
||||
switch (data_type) {
|
||||
@ -75,17 +91,14 @@ static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
|
||||
return d ? max(0, ca->mi.bucket_size - d) : 0;
|
||||
}
|
||||
|
||||
static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors,
|
||||
u32 cached_sectors,
|
||||
u32 stripe,
|
||||
struct bch_alloc_v4 a,
|
||||
enum bch_data_type data_type)
|
||||
static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
|
||||
enum bch_data_type data_type)
|
||||
{
|
||||
if (stripe)
|
||||
if (a.stripe)
|
||||
return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
|
||||
if (dirty_sectors)
|
||||
if (a.dirty_sectors)
|
||||
return data_type;
|
||||
if (cached_sectors)
|
||||
if (a.cached_sectors)
|
||||
return BCH_DATA_cached;
|
||||
if (BCH_ALLOC_V4_NEED_DISCARD(&a))
|
||||
return BCH_DATA_need_discard;
|
||||
@ -94,13 +107,6 @@ static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors,
|
||||
return BCH_DATA_free;
|
||||
}
|
||||
|
||||
static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
|
||||
enum bch_data_type data_type)
|
||||
{
|
||||
return __alloc_data_type(a.dirty_sectors, a.cached_sectors,
|
||||
a.stripe, a, data_type);
|
||||
}
|
||||
|
||||
static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
|
||||
{
|
||||
a->data_type = alloc_data_type(*a, data_type);
|
||||
|
@ -871,40 +871,35 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, iter->pos.inode);
|
||||
struct bucket old_gc, gc, *b;
|
||||
struct bkey_i_alloc_v4 *a;
|
||||
struct bch_alloc_v4 old_convert, new;
|
||||
struct bch_alloc_v4 old_gc, gc, old_convert, new;
|
||||
const struct bch_alloc_v4 *old;
|
||||
int ret;
|
||||
|
||||
old = bch2_alloc_to_v4(k, &old_convert);
|
||||
new = *old;
|
||||
gc = new = *old;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
b = gc_bucket(ca, iter->pos.offset);
|
||||
old_gc = *b;
|
||||
__bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
|
||||
|
||||
old_gc = gc;
|
||||
|
||||
if ((old->data_type == BCH_DATA_sb ||
|
||||
old->data_type == BCH_DATA_journal) &&
|
||||
!bch2_dev_is_online(ca)) {
|
||||
b->data_type = old->data_type;
|
||||
b->dirty_sectors = old->dirty_sectors;
|
||||
gc.data_type = old->data_type;
|
||||
gc.dirty_sectors = old->dirty_sectors;
|
||||
}
|
||||
|
||||
/*
|
||||
* b->data_type doesn't yet include need_discard & need_gc_gen states -
|
||||
* gc.data_type doesn't yet include need_discard & need_gc_gen states -
|
||||
* fix that here:
|
||||
*/
|
||||
b->data_type = __alloc_data_type(b->dirty_sectors,
|
||||
b->cached_sectors,
|
||||
b->stripe,
|
||||
*old,
|
||||
b->data_type);
|
||||
gc = *b;
|
||||
alloc_data_type_set(&gc, gc.data_type);
|
||||
|
||||
if (gc.data_type != old_gc.data_type ||
|
||||
gc.dirty_sectors != old_gc.dirty_sectors)
|
||||
bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
|
||||
bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
if (fsck_err_on(new.data_type != gc.data_type, c,
|
||||
|
@ -318,26 +318,6 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
|
||||
{
|
||||
return (struct bch_alloc_v4) {
|
||||
.gen = b.gen,
|
||||
.data_type = b.data_type,
|
||||
.dirty_sectors = b.dirty_sectors,
|
||||
.cached_sectors = b.cached_sectors,
|
||||
.stripe = b.stripe,
|
||||
};
|
||||
}
|
||||
|
||||
void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
|
||||
struct bucket *old, struct bucket *new)
|
||||
{
|
||||
struct bch_alloc_v4 old_a = bucket_m_to_alloc(*old);
|
||||
struct bch_alloc_v4 new_a = bucket_m_to_alloc(*new);
|
||||
|
||||
bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true);
|
||||
}
|
||||
|
||||
static inline int __update_replicas(struct bch_fs *c,
|
||||
struct bch_fs_usage *fs_usage,
|
||||
struct bch_replicas_entry_v1 *r,
|
||||
@ -1028,7 +1008,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
percpu_down_read(&c->mark_lock);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
bucket_lock(g);
|
||||
struct bucket old = *g;
|
||||
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
|
||||
|
||||
u8 bucket_data_type = g->data_type;
|
||||
int ret = __mark_pointer(trans, k, &p.ptr, *sectors,
|
||||
@ -1043,9 +1023,9 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
g->data_type = bucket_data_type;
|
||||
struct bucket new = *g;
|
||||
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
|
||||
bucket_unlock(g);
|
||||
bch2_dev_usage_update_m(c, ca, &old, &new);
|
||||
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
}
|
||||
|
||||
@ -1336,14 +1316,13 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 b, enum bch_data_type data_type, unsigned sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bucket old, new, *g;
|
||||
int ret = 0;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
g = gc_bucket(ca, b);
|
||||
struct bucket *g = gc_bucket(ca, b);
|
||||
|
||||
bucket_lock(g);
|
||||
old = *g;
|
||||
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
|
||||
|
||||
if (bch2_fs_inconsistent_on(g->data_type &&
|
||||
g->data_type != data_type, c,
|
||||
@ -1365,11 +1344,11 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
g->data_type = data_type;
|
||||
g->dirty_sectors += sectors;
|
||||
new = *g;
|
||||
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
|
||||
err:
|
||||
bucket_unlock(g);
|
||||
if (!ret)
|
||||
bch2_dev_usage_update_m(c, ca, &old, &new);
|
||||
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -310,8 +310,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
|
||||
void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
|
||||
const struct bch_alloc_v4 *,
|
||||
const struct bch_alloc_v4 *, u64, bool);
|
||||
void bch2_dev_usage_update_m(struct bch_fs *, struct bch_dev *,
|
||||
struct bucket *, struct bucket *);
|
||||
|
||||
/* key/bucket marking: */
|
||||
|
||||
|
@ -307,7 +307,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct bucket *g = gc_bucket(ca, bucket.offset);
|
||||
|
||||
bucket_lock(g);
|
||||
struct bucket old = *g;
|
||||
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
|
||||
u8 data_type = g->data_type;
|
||||
|
||||
int ret = __mark_stripe_bucket(trans, s, ptr_idx, deleting, bucket,
|
||||
@ -318,10 +318,10 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
&g->stripe,
|
||||
&g->stripe_redundancy);
|
||||
g->data_type = data_type;
|
||||
struct bucket new = *g;
|
||||
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
|
||||
bucket_unlock(g);
|
||||
if (!ret)
|
||||
bch2_dev_usage_update_m(c, ca, &old, &new);
|
||||
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user