mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
bcachefs: Add error messages for memory allocation failures
This adds some missing diagnostics from rare but annoying to debug runtime allocation failure paths. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
parent
5ba2fd1145
commit
f0f41a6d74
@ -147,19 +147,23 @@ bkey_cached_reuse(struct btree_key_cache *c)
|
||||
}
|
||||
|
||||
static struct bkey_cached *
|
||||
btree_key_cache_create(struct btree_key_cache *c,
|
||||
btree_key_cache_create(struct bch_fs *c,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos)
|
||||
{
|
||||
struct btree_key_cache *bc = &c->btree_key_cache;
|
||||
struct bkey_cached *ck;
|
||||
bool was_new = true;
|
||||
|
||||
ck = bkey_cached_alloc(c);
|
||||
ck = bkey_cached_alloc(bc);
|
||||
|
||||
if (unlikely(!ck)) {
|
||||
ck = bkey_cached_reuse(c);
|
||||
if (unlikely(!ck))
|
||||
ck = bkey_cached_reuse(bc);
|
||||
if (unlikely(!ck)) {
|
||||
bch_err(c, "error allocating memory for key cache item, btree %s",
|
||||
bch2_btree_ids[btree_id]);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
was_new = false;
|
||||
}
|
||||
@ -176,7 +180,7 @@ btree_key_cache_create(struct btree_key_cache *c,
|
||||
ck->valid = false;
|
||||
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
||||
|
||||
if (unlikely(rhashtable_lookup_insert_fast(&c->table,
|
||||
if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
|
||||
&ck->hash,
|
||||
bch2_btree_key_cache_params))) {
|
||||
/* We raced with another fill: */
|
||||
@ -186,15 +190,15 @@ btree_key_cache_create(struct btree_key_cache *c,
|
||||
six_unlock_intent(&ck->c.lock);
|
||||
kfree(ck);
|
||||
} else {
|
||||
mutex_lock(&c->lock);
|
||||
bkey_cached_free(c, ck);
|
||||
mutex_unlock(&c->lock);
|
||||
mutex_lock(&bc->lock);
|
||||
bkey_cached_free(bc, ck);
|
||||
mutex_unlock(&bc->lock);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_long_inc(&c->nr_keys);
|
||||
atomic_long_inc(&bc->nr_keys);
|
||||
|
||||
six_unlock_write(&ck->c.lock);
|
||||
|
||||
@ -205,6 +209,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
struct btree_path *ck_path,
|
||||
struct bkey_cached *ck)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
unsigned new_u64s = 0;
|
||||
@ -234,6 +239,8 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
new_u64s = roundup_pow_of_two(new_u64s);
|
||||
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
|
||||
if (!new_k) {
|
||||
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||
bch2_btree_ids[ck->key.btree_id], new_u64s);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
@ -294,8 +301,7 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
ck = btree_key_cache_create(&c->btree_key_cache,
|
||||
path->btree_id, path->pos);
|
||||
ck = btree_key_cache_create(c, path->btree_id, path->pos);
|
||||
ret = PTR_ERR_OR_ZERO(ck);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -308,6 +308,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned u64s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
unsigned new_u64s;
|
||||
struct bkey_i *new_k;
|
||||
@ -315,7 +316,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
EBUG_ON(path->level);
|
||||
|
||||
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
||||
bch2_btree_key_cache_must_wait(trans->c) &&
|
||||
bch2_btree_key_cache_must_wait(c) &&
|
||||
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
||||
return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
|
||||
|
||||
@ -330,8 +331,11 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
|
||||
new_u64s = roundup_pow_of_two(u64s);
|
||||
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
|
||||
if (!new_k)
|
||||
if (!new_k) {
|
||||
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||
bch2_btree_ids[path->btree_id], new_u64s);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ck->u64s = new_u64s;
|
||||
ck->k = new_k;
|
||||
|
@ -926,9 +926,11 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
|
||||
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
||||
|
||||
if (!m)
|
||||
if (!m) {
|
||||
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||
(u64) p.idx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
|
||||
@ -1039,7 +1041,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
bool gc = flags & BTREE_TRIGGER_GC;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
struct bch_fs *c = trans->c;
|
||||
size_t idx = new.k->p.offset;
|
||||
u64 idx = new.k->p.offset;
|
||||
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
|
||||
? bkey_s_c_to_stripe(old).v : NULL;
|
||||
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
||||
@ -1057,7 +1059,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
|
||||
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
|
||||
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
|
||||
"old %s\n"
|
||||
"new %s", idx, buf1, buf2);
|
||||
bch2_inconsistent_error(c);
|
||||
@ -1089,9 +1091,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
struct gc_stripe *m =
|
||||
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
||||
|
||||
if (!m)
|
||||
if (!m) {
|
||||
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||
idx);
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
/*
|
||||
* This will be wrong when we bring back runtime gc: we should
|
||||
* be unmarking the old key and then marking the new key
|
||||
|
@ -564,14 +564,17 @@ static struct inode_walker inode_walker_init(void)
|
||||
return (struct inode_walker) { 0, };
|
||||
}
|
||||
|
||||
static int inode_walker_realloc(struct inode_walker *w)
|
||||
static int inode_walker_realloc(struct bch_fs *c, struct inode_walker *w)
|
||||
{
|
||||
if (w->nr == w->size) {
|
||||
size_t new_size = max_t(size_t, 8UL, w->size * 2);
|
||||
void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
|
||||
GFP_KERNEL);
|
||||
if (!d)
|
||||
if (!d) {
|
||||
bch_err(c, "fsck: error allocating memory for inode_walker, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
w->d = d;
|
||||
w->size = new_size;
|
||||
@ -586,7 +589,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
|
||||
struct bch_inode_unpacked u;
|
||||
int ret;
|
||||
|
||||
ret = inode_walker_realloc(w);
|
||||
ret = inode_walker_realloc(c, w);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -647,7 +650,7 @@ found:
|
||||
while (i && w->d[i - 1].snapshot > pos.snapshot)
|
||||
--i;
|
||||
|
||||
ret = inode_walker_realloc(w);
|
||||
ret = inode_walker_realloc(c, w);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1812,7 +1815,8 @@ static bool path_is_dup(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
static int path_down(struct bch_fs *c, struct pathbuf *p,
|
||||
u64 inum, u32 snapshot)
|
||||
{
|
||||
if (p->nr == p->size) {
|
||||
size_t new_size = max_t(size_t, 256UL, p->size * 2);
|
||||
@ -1820,6 +1824,8 @@ static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
new_size * sizeof(p->entries[0]),
|
||||
GFP_KERNEL);
|
||||
if (!n) {
|
||||
bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1893,7 +1899,7 @@ static int check_path(struct btree_trans *trans,
|
||||
if (!S_ISDIR(inode->bi_mode))
|
||||
break;
|
||||
|
||||
ret = path_down(p, inode->bi_inum, snapshot);
|
||||
ret = path_down(c, p, inode->bi_inum, snapshot);
|
||||
if (ret) {
|
||||
bch_err(c, "memory allocation failure");
|
||||
return ret;
|
||||
@ -1998,12 +2004,15 @@ struct nlink_table {
|
||||
} *d;
|
||||
};
|
||||
|
||||
static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot)
|
||||
static int add_nlink(struct bch_fs *c, struct nlink_table *t,
|
||||
u64 inum, u32 snapshot)
|
||||
{
|
||||
if (t->nr == t->size) {
|
||||
size_t new_size = max_t(size_t, 128UL, t->size * 2);
|
||||
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
|
||||
if (!d) {
|
||||
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -2093,7 +2102,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
if (!u.bi_nlink)
|
||||
continue;
|
||||
|
||||
ret = add_nlink(t, k.k->p.offset, k.k->p.snapshot);
|
||||
ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
|
||||
if (ret) {
|
||||
*end = k.k->p.offset;
|
||||
ret = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user