mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-08 14:23:19 +00:00
bcachefs: Move gc of bucket.oldest_gen to workqueue
This is a nice cleanup - and we've also been having problems with kthread creation in the mount path. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b25fd02ab4
commit
103304021e
@ -874,7 +874,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
bch2_do_invalidates(c);
|
||||
|
||||
if (statechange(a->data_type == BCH_DATA_need_gc_gens))
|
||||
bch2_do_gc_gens(c);
|
||||
bch2_gc_gens_async(c);
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_gc) &&
|
||||
|
@ -541,7 +541,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
bch2_do_discards(c);
|
||||
|
||||
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
|
||||
bch2_do_gc_gens(c);
|
||||
bch2_gc_gens_async(c);
|
||||
|
||||
if (should_invalidate_buckets(ca, *usage))
|
||||
bch2_do_invalidates(c);
|
||||
|
@ -718,6 +718,7 @@ struct btree_trans_buf {
|
||||
x(discard_fast) \
|
||||
x(invalidate) \
|
||||
x(delete_dead_snapshots) \
|
||||
x(gc_gens) \
|
||||
x(snapshot_delete_pagecache) \
|
||||
x(sysfs) \
|
||||
x(btree_write_buffer)
|
||||
@ -960,8 +961,7 @@ struct bch_fs {
|
||||
struct work_struct discard_fast_work;
|
||||
|
||||
/* GARBAGE COLLECTION */
|
||||
struct task_struct *gc_thread;
|
||||
atomic_t kick_gc;
|
||||
struct work_struct gc_gens_work;
|
||||
unsigned long gc_count;
|
||||
|
||||
enum btree_id gc_gens_btree;
|
||||
@ -1118,7 +1118,6 @@ struct bch_fs {
|
||||
u64 counters_on_mount[BCH_COUNTER_NR];
|
||||
u64 __percpu *counters;
|
||||
|
||||
unsigned btree_gc_periodic:1;
|
||||
unsigned copy_gc_enabled:1;
|
||||
bool promote_whole_extents;
|
||||
|
||||
|
@ -1669,6 +1669,9 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
struct bkey_i *u;
|
||||
int ret;
|
||||
|
||||
if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
|
||||
return -EROFS;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
@ -1802,80 +1805,23 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_thread(void *arg)
|
||||
static void bch2_gc_gens_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = arg;
|
||||
struct io_clock *clock = &c->io_clock[WRITE];
|
||||
unsigned long last = atomic64_read(&clock->now);
|
||||
unsigned last_kick = atomic_read(&c->kick_gc);
|
||||
|
||||
set_freezable();
|
||||
|
||||
while (1) {
|
||||
while (1) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (kthread_should_stop()) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic_read(&c->kick_gc) != last_kick)
|
||||
break;
|
||||
|
||||
if (c->btree_gc_periodic) {
|
||||
unsigned long next = last + c->capacity / 16;
|
||||
|
||||
if (atomic64_read(&clock->now) >= next)
|
||||
break;
|
||||
|
||||
bch2_io_clock_schedule_timeout(clock, next);
|
||||
} else {
|
||||
schedule();
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
last = atomic64_read(&clock->now);
|
||||
last_kick = atomic_read(&c->kick_gc);
|
||||
|
||||
bch2_gc_gens(c);
|
||||
debug_check_no_locks_held();
|
||||
}
|
||||
|
||||
return 0;
|
||||
struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
|
||||
bch2_gc_gens(c);
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
|
||||
}
|
||||
|
||||
void bch2_gc_thread_stop(struct bch_fs *c)
|
||||
void bch2_gc_gens_async(struct bch_fs *c)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
p = c->gc_thread;
|
||||
c->gc_thread = NULL;
|
||||
|
||||
if (p) {
|
||||
kthread_stop(p);
|
||||
put_task_struct(p);
|
||||
}
|
||||
if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
|
||||
!queue_work(c->write_ref_wq, &c->gc_gens_work))
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
|
||||
}
|
||||
|
||||
int bch2_gc_thread_start(struct bch_fs *c)
|
||||
void bch2_fs_gc_init(struct bch_fs *c)
|
||||
{
|
||||
struct task_struct *p;
|
||||
seqcount_init(&c->gc_pos_lock);
|
||||
|
||||
if (c->gc_thread)
|
||||
return 0;
|
||||
|
||||
p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
|
||||
if (IS_ERR(p)) {
|
||||
bch_err_fn(c, PTR_ERR(p));
|
||||
return PTR_ERR(p);
|
||||
}
|
||||
|
||||
get_task_struct(p);
|
||||
c->gc_thread = p;
|
||||
wake_up_process(p);
|
||||
return 0;
|
||||
INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
|
||||
}
|
||||
|
@ -7,9 +7,6 @@
|
||||
|
||||
int bch2_check_topology(struct bch_fs *);
|
||||
int bch2_check_allocations(struct bch_fs *);
|
||||
int bch2_gc_gens(struct bch_fs *);
|
||||
void bch2_gc_thread_stop(struct bch_fs *);
|
||||
int bch2_gc_thread_start(struct bch_fs *);
|
||||
|
||||
/*
|
||||
* For concurrent mark and sweep (with other index updates), we define a total
|
||||
@ -104,11 +101,8 @@ static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void bch2_do_gc_gens(struct bch_fs *c)
|
||||
{
|
||||
atomic_inc(&c->kick_gc);
|
||||
if (c->gc_thread)
|
||||
wake_up_process(c->gc_thread);
|
||||
}
|
||||
int bch2_gc_gens(struct bch_fs *);
|
||||
void bch2_gc_gens_async(struct bch_fs *);
|
||||
void bch2_fs_gc_init(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_BTREE_GC_H */
|
||||
|
@ -264,7 +264,6 @@ static void __bch2_fs_read_only(struct bch_fs *c)
|
||||
bch2_open_buckets_stop(c, NULL, true);
|
||||
bch2_rebalance_stop(c);
|
||||
bch2_copygc_stop(c);
|
||||
bch2_gc_thread_stop(c);
|
||||
bch2_fs_ec_flush(c);
|
||||
|
||||
bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
|
||||
@ -486,12 +485,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = bch2_gc_thread_start(c);
|
||||
if (ret) {
|
||||
bch_err(c, "error starting gc thread");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bch2_journal_reclaim_start(&c->journal);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -780,6 +773,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
for (i = 0; i < BCH_TIME_STAT_NR; i++)
|
||||
bch2_time_stats_init(&c->times[i]);
|
||||
|
||||
bch2_fs_gc_init(c);
|
||||
bch2_fs_copygc_init(c);
|
||||
bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
|
||||
bch2_fs_btree_iter_init_early(c);
|
||||
@ -810,8 +804,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
INIT_LIST_HEAD(&c->fsck_error_msgs);
|
||||
mutex_init(&c->fsck_error_msgs_lock);
|
||||
|
||||
seqcount_init(&c->gc_pos_lock);
|
||||
|
||||
seqcount_init(&c->usage_lock);
|
||||
|
||||
sema_init(&c->io_in_flight, 128);
|
||||
|
@ -142,7 +142,6 @@ write_attribute(trigger_invalidates);
|
||||
write_attribute(trigger_journal_flush);
|
||||
write_attribute(prune_cache);
|
||||
write_attribute(btree_wakeup);
|
||||
rw_attribute(btree_gc_periodic);
|
||||
rw_attribute(gc_gens_pos);
|
||||
|
||||
read_attribute(uuid);
|
||||
@ -408,8 +407,6 @@ SHOW(bch2_fs)
|
||||
if (attr == &sysfs_btree_write_stats)
|
||||
bch2_btree_write_stats_to_text(out, c);
|
||||
|
||||
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
|
||||
|
||||
if (attr == &sysfs_gc_gens_pos)
|
||||
bch2_gc_gens_pos_to_text(out, c);
|
||||
|
||||
@ -485,14 +482,6 @@ STORE(bch2_fs)
|
||||
{
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
||||
|
||||
if (attr == &sysfs_btree_gc_periodic) {
|
||||
ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
|
||||
?: (ssize_t) size;
|
||||
|
||||
wake_up_process(c->gc_thread);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (attr == &sysfs_copy_gc_enabled) {
|
||||
ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
|
||||
?: (ssize_t) size;
|
||||
|
Loading…
Reference in New Issue
Block a user