bcachefs: Better calculation of copygc threshold

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2018-08-01 14:26:55 -04:00 committed by Kent Overstreet
parent 6eac2c2e24
commit a9bec5208b
4 changed files with 33 additions and 24 deletions

View File

@ -1711,7 +1711,7 @@ void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_dev *ca;
u64 total_capacity, capacity = 0, reserved_sectors = 0;
u64 capacity = 0, reserved_sectors = 0;
unsigned long ra_pages = 0;
unsigned i, j;
@ -1726,7 +1726,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
bch2_set_ra_pages(c, ra_pages);
for_each_rw_member(ca, c, i) {
size_t reserve = 0;
u64 dev_capacity, dev_reserve = 0;
/*
* We need to reserve buckets (from the number
@ -1745,30 +1745,40 @@ void bch2_recalc_capacity(struct bch_fs *c)
* not -ENOSPC calculations.
*/
for (j = 0; j < RESERVE_NONE; j++)
reserve += ca->free[j].size;
dev_reserve += ca->free[j].size;
reserve += ca->free_inc.size;
dev_reserve += ca->free_inc.size;
reserve += ARRAY_SIZE(c->write_points);
dev_reserve += ARRAY_SIZE(c->write_points);
reserve += 1; /* btree write point */
dev_reserve += 1; /* btree write point */
dev_reserve += 1; /* copygc write point */
dev_reserve += 1; /* rebalance write point */
dev_reserve += WRITE_POINT_COUNT;
reserved_sectors += bucket_to_sector(ca, reserve);
dev_reserve *= ca->mi.bucket_size;
capacity += bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
dev_reserve *= 2;
dev_capacity = bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
ca->copygc_threshold =
max(div64_u64(dev_capacity *
c->opts.gc_reserve_percent, 100),
dev_reserve) / 2;
capacity += dev_capacity;
reserved_sectors += dev_reserve;
}
total_capacity = capacity;
reserved_sectors = max(div64_u64(capacity *
c->opts.gc_reserve_percent, 100),
reserved_sectors);
capacity *= (100 - c->opts.gc_reserve_percent);
capacity = div64_u64(capacity, 100);
BUG_ON(reserved_sectors > capacity);
BUG_ON(reserved_sectors > total_capacity);
capacity = min(capacity, total_capacity - reserved_sectors);
c->capacity = capacity;
c->capacity = capacity - reserved_sectors;
if (c->capacity) {
bch2_io_timer_add(&c->io_clock[READ],

View File

@ -427,6 +427,7 @@ struct bch_dev {
copygc_heap copygc_heap;
struct bch_pd_controller copygc_pd;
struct write_point copygc_write_point;
u64 copygc_threshold;
atomic64_t rebalance_work;

View File

@ -228,16 +228,10 @@ static int bch2_copygc_thread(void *arg)
last = atomic_long_read(&clock->now);
reserve = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
ca->mi.bucket_size *
c->opts.gc_reserve_percent, 200);
reserve = ca->copygc_threshold;
usage = bch2_dev_usage_read(c, ca);
/*
* don't start copygc until less than half the gc reserve is
* available:
*/
available = __dev_buckets_available(ca, usage) *
ca->mi.bucket_size;
if (available > reserve) {

View File

@ -788,6 +788,8 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
" meta: %llu\n"
" user: %llu\n"
" cached: %llu\n"
" fragmented: %llu\n"
" copygc threshold: %llu\n"
"freelist_wait: %s\n"
"open buckets: %u/%u (reserved %u)\n"
"open_buckets_wait: %s\n",
@ -808,6 +810,8 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
stats.sectors[BCH_DATA_BTREE],
stats.sectors[BCH_DATA_USER],
stats.sectors[BCH_DATA_CACHED],
stats.sectors_fragmented,
ca->copygc_threshold,
c->freelist_wait.list.first ? "waiting" : "empty",
c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
c->open_buckets_wait.list.first ? "waiting" : "empty");