mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
bcachefs: Heap allocate printbufs
This patch changes printbufs dynamically allocate and reallocate a buffer as needed. Stack usage has become a bit of a problem, and a major cause of that has been static size string buffers on the stack. The most involved part of this refactoring is that printbufs must now be exited with printbuf_exit(). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
2be7b16eee
commit
fa8e94faee
@ -57,11 +57,12 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
|
||||
tmp = __bch2_bkey_unpack_key(format, packed);
|
||||
|
||||
if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
|
||||
char buf1[160], buf2[160];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
char buf3[160], buf4[160];
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf1), unpacked);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &tmp);
|
||||
bch2_bkey_to_text(&buf1, unpacked);
|
||||
bch2_bkey_to_text(&buf2, &tmp);
|
||||
bch2_to_binary(buf3, (void *) unpacked, 80);
|
||||
bch2_to_binary(buf4, high_word(format, packed), 80);
|
||||
|
||||
@ -72,7 +73,7 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
|
||||
format->bits_per_field[2],
|
||||
format->bits_per_field[3],
|
||||
format->bits_per_field[4],
|
||||
buf1, buf2, buf3, buf4);
|
||||
buf1.buf, buf2.buf, buf3, buf4);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
struct bkey_packed *_k, *_n;
|
||||
struct bkey uk, n;
|
||||
struct bkey_s_c k;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
if (!i->u64s)
|
||||
return;
|
||||
@ -69,12 +69,14 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
_n = bkey_next(_k);
|
||||
|
||||
k = bkey_disassemble(b, _k, &uk);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
if (c)
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, k);
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
else
|
||||
bch2_bkey_to_text(&PBUF(buf), k.k);
|
||||
bch2_bkey_to_text(&buf, k.k);
|
||||
printk(KERN_ERR "block %u key %5zu: %s\n", set,
|
||||
_k->_data - i->_data, buf);
|
||||
_k->_data - i->_data, buf.buf);
|
||||
|
||||
if (_n == vstruct_last(i))
|
||||
continue;
|
||||
@ -90,6 +92,8 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
!bpos_cmp(n.p, k.k->p))
|
||||
printk(KERN_ERR "Duplicate keys\n");
|
||||
}
|
||||
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
|
||||
@ -106,6 +110,7 @@ void bch2_dump_btree_node_iter(struct btree *b,
|
||||
struct btree_node_iter *iter)
|
||||
{
|
||||
struct btree_node_iter_set *set;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
printk(KERN_ERR "btree node iter with %u/%u sets:\n",
|
||||
__btree_node_iter_used(iter), b->nsets);
|
||||
@ -114,12 +119,14 @@ void bch2_dump_btree_node_iter(struct btree *b,
|
||||
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
|
||||
struct bset_tree *t = bch2_bkey_to_bset(b, k);
|
||||
struct bkey uk = bkey_unpack_key(b, k);
|
||||
char buf[100];
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf), &uk);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_to_text(&buf, &uk);
|
||||
printk(KERN_ERR "set %zu key %u: %s\n",
|
||||
t - b->set, set->k, buf);
|
||||
t - b->set, set->k, buf.buf);
|
||||
}
|
||||
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
@ -155,13 +162,14 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
|
||||
struct btree_node_iter_set *set;
|
||||
struct bkey ku = bkey_unpack_key(b, k);
|
||||
struct bkey nu = bkey_unpack_key(b, n);
|
||||
char buf1[80], buf2[80];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &ku);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &nu);
|
||||
bch2_bkey_to_text(&buf1, &ku);
|
||||
bch2_bkey_to_text(&buf2, &nu);
|
||||
printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
printk(KERN_ERR "iter was:");
|
||||
|
||||
btree_node_iter_for_each(_iter, set) {
|
||||
@ -226,6 +234,8 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
|
||||
struct bset_tree *t = bch2_bkey_to_bset(b, where);
|
||||
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
|
||||
struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
#if 0
|
||||
BUG_ON(prev &&
|
||||
bkey_iter_cmp(b, prev, insert) > 0);
|
||||
@ -234,17 +244,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
|
||||
bkey_iter_cmp(b, prev, insert) > 0) {
|
||||
struct bkey k1 = bkey_unpack_key(b, prev);
|
||||
struct bkey k2 = bkey_unpack_key(b, insert);
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &k1);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &k2);
|
||||
bch2_bkey_to_text(&buf1, &k1);
|
||||
bch2_bkey_to_text(&buf2, &k2);
|
||||
|
||||
panic("prev > insert:\n"
|
||||
"prev key %s\n"
|
||||
"insert key %s\n",
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
@ -255,17 +263,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
|
||||
bkey_iter_cmp(b, insert, next) > 0) {
|
||||
struct bkey k1 = bkey_unpack_key(b, insert);
|
||||
struct bkey k2 = bkey_unpack_key(b, next);
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &k1);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &k2);
|
||||
bch2_bkey_to_text(&buf1, &k1);
|
||||
bch2_bkey_to_text(&buf2, &k2);
|
||||
|
||||
panic("insert > next:\n"
|
||||
"insert key %s\n"
|
||||
"next key %s\n",
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -1555,9 +1561,6 @@ void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
|
||||
struct bkey uk;
|
||||
unsigned j, inorder;
|
||||
|
||||
if (out->pos != out->end)
|
||||
*out->pos = '\0';
|
||||
|
||||
if (!bset_has_ro_aux_tree(t))
|
||||
return;
|
||||
|
||||
|
@ -742,14 +742,16 @@ static int lock_node_check_fn(struct six_lock *lock, void *p)
|
||||
|
||||
static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
char buf1[200], buf2[100], buf3[100];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
struct printbuf buf3 = PRINTBUF;
|
||||
|
||||
if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
|
||||
return;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&b->key));
|
||||
bch2_bpos_to_text(&PBUF(buf2), b->data->min_key);
|
||||
bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&b->key));
|
||||
bch2_bpos_to_text(&buf2, b->data->min_key);
|
||||
bch2_bpos_to_text(&buf3, b->data->max_key);
|
||||
|
||||
bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n"
|
||||
"btree %s level %u\n"
|
||||
@ -757,10 +759,14 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
|
||||
"header: btree %s level %llu\n"
|
||||
"min %s max %s\n",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1,
|
||||
buf1.buf,
|
||||
bch2_btree_ids[BTREE_NODE_ID(b->data)],
|
||||
BTREE_NODE_LEVEL(b->data),
|
||||
buf2, buf3);
|
||||
buf2.buf, buf3.buf);
|
||||
|
||||
printbuf_exit(&buf3);
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
}
|
||||
|
||||
static inline void btree_check_header(struct bch_fs *c, struct btree *b)
|
||||
|
@ -70,23 +70,23 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
struct bpos expected_start = bkey_deleted(&prev->k->k)
|
||||
? node_start
|
||||
: bpos_successor(prev->k->k.p);
|
||||
char buf1[200], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
|
||||
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
|
||||
|
||||
if (bkey_deleted(&prev->k->k)) {
|
||||
struct printbuf out = PBUF(buf1);
|
||||
pr_buf(&out, "start of node: ");
|
||||
bch2_bpos_to_text(&out, node_start);
|
||||
} else {
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
|
||||
}
|
||||
|
||||
if (bpos_cmp(expected_start, bp->v.min_key)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
if (bkey_deleted(&prev->k->k)) {
|
||||
pr_buf(&buf1, "start of node: ");
|
||||
bch2_bpos_to_text(&buf1, node_start);
|
||||
} else {
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
|
||||
}
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
|
||||
|
||||
if (__fsck_err(c,
|
||||
FSCK_CAN_FIX|
|
||||
FSCK_CAN_IGNORE|
|
||||
@ -95,11 +95,11 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
" prev %s\n"
|
||||
" cur %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) &&
|
||||
buf1.buf, buf2.buf) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
return FSCK_ERR_START_TOPOLOGY_REPAIR;
|
||||
ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
|
||||
}
|
||||
@ -109,6 +109,12 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf1);
|
||||
printbuf_reset(&buf2);
|
||||
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
|
||||
bch2_bpos_to_text(&buf2, node_end);
|
||||
|
||||
if (__fsck_err(c,
|
||||
FSCK_CAN_FIX|
|
||||
FSCK_CAN_IGNORE|
|
||||
@ -117,18 +123,21 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
" %s\n"
|
||||
" expected %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
|
||||
(bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) &&
|
||||
buf1.buf, buf2.buf) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
return FSCK_ERR_START_TOPOLOGY_REPAIR;
|
||||
ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
|
||||
}
|
||||
}
|
||||
|
||||
bch2_bkey_buf_copy(prev, c, cur.k);
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -251,18 +260,17 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
struct bpos expected_start = !prev
|
||||
? b->data->min_key
|
||||
: bpos_successor(prev->key.k.p);
|
||||
char buf1[200], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (!prev) {
|
||||
struct printbuf out = PBUF(buf1);
|
||||
pr_buf(&out, "start of node: ");
|
||||
bch2_bpos_to_text(&out, b->data->min_key);
|
||||
pr_buf(&buf1, "start of node: ");
|
||||
bch2_bpos_to_text(&buf1, b->data->min_key);
|
||||
} else {
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
|
||||
}
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key));
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
|
||||
|
||||
if (prev &&
|
||||
bpos_cmp(expected_start, cur->data->min_key) > 0 &&
|
||||
@ -275,8 +283,10 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
" node %s\n"
|
||||
" next %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1, buf2))
|
||||
return DROP_PREV_NODE;
|
||||
buf1.buf, buf2.buf)) {
|
||||
ret = DROP_PREV_NODE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
|
||||
bpos_predecessor(cur->data->min_key)), c,
|
||||
@ -284,7 +294,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
" node %s\n"
|
||||
" next %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1, buf2))
|
||||
buf1.buf, buf2.buf))
|
||||
ret = set_node_max(c, prev,
|
||||
bpos_predecessor(cur->data->min_key));
|
||||
} else {
|
||||
@ -296,39 +306,49 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
" prev %s\n"
|
||||
" node %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1, buf2))
|
||||
return DROP_THIS_NODE;
|
||||
buf1.buf, buf2.buf)) {
|
||||
ret = DROP_THIS_NODE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
|
||||
"btree node with incorrect min_key at btree %s level %u:\n"
|
||||
" prev %s\n"
|
||||
" node %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1, buf2))
|
||||
buf1.buf, buf2.buf))
|
||||
ret = set_node_min(c, cur, expected_start);
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
|
||||
struct btree *child)
|
||||
{
|
||||
char buf1[200], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
|
||||
bch2_bpos_to_text(&buf2, b->key.k.p);
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
|
||||
"btree node with incorrect max_key at btree %s level %u:\n"
|
||||
" %s\n"
|
||||
" expected %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1),
|
||||
(bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) {
|
||||
buf1.buf, buf2.buf)) {
|
||||
ret = set_node_max(c, child, b->key.k.p);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -339,7 +359,7 @@ static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
|
||||
struct bkey_buf prev_k, cur_k;
|
||||
struct btree *prev = NULL, *cur = NULL;
|
||||
bool have_child, dropped_children = false;
|
||||
char buf[200];
|
||||
struct printbuf buf;
|
||||
int ret = 0;
|
||||
|
||||
if (!b->c.level)
|
||||
@ -363,12 +383,15 @@ static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
|
||||
false);
|
||||
ret = PTR_ERR_OR_ZERO(cur);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
|
||||
|
||||
if (mustfix_fsck_err_on(ret == -EIO, c,
|
||||
"Unreadable btree node at btree %s level %u:\n"
|
||||
" %s",
|
||||
bch2_btree_ids[b->c.btree_id],
|
||||
b->c.level - 1,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) {
|
||||
buf.buf)) {
|
||||
bch2_btree_node_evict(c, cur_k.k);
|
||||
ret = bch2_journal_key_delete(c, b->c.btree_id,
|
||||
b->c.level, cur_k.k->k.p);
|
||||
@ -468,12 +491,14 @@ static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
|
||||
have_child = true;
|
||||
}
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
|
||||
|
||||
if (mustfix_fsck_err_on(!have_child, c,
|
||||
"empty interior btree node at btree %s level %u\n"
|
||||
" %s",
|
||||
bch2_btree_ids[b->c.btree_id],
|
||||
b->c.level,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf)))
|
||||
b->c.level, buf.buf))
|
||||
ret = DROP_THIS_NODE;
|
||||
err:
|
||||
fsck_err:
|
||||
@ -489,6 +514,7 @@ static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
|
||||
if (!ret && dropped_children)
|
||||
goto again;
|
||||
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -524,7 +550,7 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p = { 0 };
|
||||
bool do_update = false;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
@ -542,7 +568,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
|
||||
if (!p.ptr.cached) {
|
||||
g->_mark.gen = p.ptr.gen;
|
||||
g->gen_valid = true;
|
||||
@ -557,7 +584,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen, g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
|
||||
if (!p.ptr.cached) {
|
||||
g->_mark.gen = p.ptr.gen;
|
||||
g->gen_valid = true;
|
||||
@ -576,7 +604,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
|
||||
do_update = true;
|
||||
|
||||
if (fsck_err_on(!p.ptr.cached &&
|
||||
@ -586,7 +615,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen, g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
|
||||
do_update = true;
|
||||
|
||||
if (data_type != BCH_DATA_btree && p.ptr.gen != g->mark.gen)
|
||||
@ -599,7 +629,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[g->mark.data_type],
|
||||
bch2_data_types[data_type],
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
|
||||
if (data_type == BCH_DATA_btree) {
|
||||
g->_mark.data_type = data_type;
|
||||
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
|
||||
@ -615,14 +646,16 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
"pointer to nonexistent stripe %llu\n"
|
||||
"while marking %s",
|
||||
(u64) p.ec.idx,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
|
||||
do_update = true;
|
||||
|
||||
if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
|
||||
"pointer does not match stripe %llu\n"
|
||||
"while marking %s",
|
||||
(u64) p.ec.idx,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
|
||||
do_update = true;
|
||||
}
|
||||
}
|
||||
@ -635,13 +668,15 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
|
||||
if (is_root) {
|
||||
bch_err(c, "cannot update btree roots yet");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
|
||||
if (!new) {
|
||||
bch_err(c, "%s: error allocating new key", __func__);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bkey_reassemble(new, *k);
|
||||
@ -705,19 +740,25 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
|
||||
if (ret) {
|
||||
kfree(new);
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (level)
|
||||
bch2_btree_node_update_key_early(c, btree_id, level - 1, *k, new);
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, *k);
|
||||
bch_info(c, "updated %s", buf);
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(new));
|
||||
bch_info(c, "new key %s", buf);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, *k);
|
||||
bch_info(c, "updated %s", buf.buf);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
|
||||
bch_info(c, "new key %s", buf.buf);
|
||||
|
||||
*k = bkey_i_to_s_c(new);
|
||||
}
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -852,7 +893,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf cur, prev;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
|
||||
@ -913,7 +954,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
" %s",
|
||||
bch2_btree_ids[b->c.btree_id],
|
||||
b->c.level - 1,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) &&
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
@ -943,6 +985,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -956,7 +999,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
: bch2_expensive_debug_checks ? 0
|
||||
: !btree_node_type_needs_gc(btree_id) ? 1
|
||||
: 0;
|
||||
char buf[100];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
b = c->btree_roots[btree_id].b;
|
||||
@ -965,17 +1008,19 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
six_lock_read(&b->c.lock, NULL, NULL);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->min_key);
|
||||
if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
|
||||
"btree root with incorrect min_key: %s",
|
||||
(bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
|
||||
"btree root with incorrect min_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = FSCK_ERR_EXIT;
|
||||
goto fsck_err;
|
||||
}
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->max_key);
|
||||
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
|
||||
"btree root with incorrect max_key: %s",
|
||||
(bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
|
||||
"btree root with incorrect max_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = FSCK_ERR_EXIT;
|
||||
goto fsck_err;
|
||||
@ -995,6 +1040,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
|
||||
if (ret < 0)
|
||||
bch_err(c, "%s: ret %i", __func__, ret);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1131,6 +1177,7 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
bool initial, bool metadata_only)
|
||||
{
|
||||
struct bch_dev *ca = NULL;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bool verify = !metadata_only && (!initial ||
|
||||
(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
|
||||
unsigned i, dev;
|
||||
@ -1201,16 +1248,16 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
for (i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
char buf[80];
|
||||
|
||||
if (metadata_only &&
|
||||
(e->data_type == BCH_DATA_user ||
|
||||
e->data_type == BCH_DATA_cached))
|
||||
continue;
|
||||
|
||||
bch2_replicas_entry_to_text(&PBUF(buf), e);
|
||||
printbuf_reset(&buf);
|
||||
bch2_replicas_entry_to_text(&buf, e);
|
||||
|
||||
copy_fs_field(replicas[i], "%s", buf);
|
||||
copy_fs_field(replicas[i], "%s", buf.buf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1225,6 +1272,7 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
bch_err(c, "%s: ret %i", __func__, ret);
|
||||
|
||||
percpu_up_write(&c->mark_lock);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1424,7 +1472,7 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
|
||||
struct bkey_s_c k;
|
||||
struct reflink_gc *r;
|
||||
size_t idx = 0;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (metadata_only)
|
||||
@ -1452,7 +1500,8 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
|
||||
"reflink key has wrong refcount:\n"
|
||||
" %s\n"
|
||||
" should be %u",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
|
||||
r->refcount)) {
|
||||
struct bkey_i *new;
|
||||
|
||||
@ -1481,6 +1530,7 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
c->reflink_gc_nr = 0;
|
||||
bch2_trans_exit(&trans);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1539,7 +1589,7 @@ static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
|
||||
struct bkey_s_c k;
|
||||
struct gc_stripe *m;
|
||||
const struct bch_stripe *s;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
@ -1565,7 +1615,8 @@ static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
|
||||
"stripe has wrong block sector count %u:\n"
|
||||
" %s\n"
|
||||
" should be %u", i,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
|
||||
m ? m->block_sectors[i] : 0)) {
|
||||
struct bkey_i_stripe *new;
|
||||
|
||||
@ -1589,6 +1640,8 @@ static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -534,13 +534,7 @@ enum btree_validate_ret {
|
||||
#define btree_err(type, c, ca, b, i, msg, ...) \
|
||||
({ \
|
||||
__label__ out; \
|
||||
char _buf[300]; \
|
||||
char *_buf2 = _buf; \
|
||||
struct printbuf out = PBUF(_buf); \
|
||||
\
|
||||
_buf2 = kmalloc(4096, GFP_ATOMIC); \
|
||||
if (_buf2) \
|
||||
out = _PBUF(_buf2, 4986); \
|
||||
struct printbuf out = PRINTBUF; \
|
||||
\
|
||||
btree_err_msg(&out, c, ca, b, i, b->written, write); \
|
||||
pr_buf(&out, ": " msg, ##__VA_ARGS__); \
|
||||
@ -548,14 +542,13 @@ enum btree_validate_ret {
|
||||
if (type == BTREE_ERR_FIXABLE && \
|
||||
write == READ && \
|
||||
!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
|
||||
mustfix_fsck_err(c, "%s", _buf2); \
|
||||
mustfix_fsck_err(c, "%s", out.buf); \
|
||||
goto out; \
|
||||
} \
|
||||
\
|
||||
switch (write) { \
|
||||
case READ: \
|
||||
if (_buf2) \
|
||||
bch_err(c, "%s", _buf2); \
|
||||
bch_err(c, "%s", out.buf); \
|
||||
\
|
||||
switch (type) { \
|
||||
case BTREE_ERR_FIXABLE: \
|
||||
@ -576,7 +569,7 @@ enum btree_validate_ret {
|
||||
} \
|
||||
break; \
|
||||
case WRITE: \
|
||||
bch_err(c, "corrupt metadata before write: %s", _buf2); \
|
||||
bch_err(c, "corrupt metadata before write: %s", out.buf);\
|
||||
\
|
||||
if (bch2_fs_inconsistent(c)) { \
|
||||
ret = BCH_FSCK_ERRORS_NOT_FIXED; \
|
||||
@ -585,8 +578,7 @@ enum btree_validate_ret {
|
||||
break; \
|
||||
} \
|
||||
out: \
|
||||
if (_buf2 != _buf) \
|
||||
kfree(_buf2); \
|
||||
printbuf_exit(&out); \
|
||||
true; \
|
||||
})
|
||||
|
||||
@ -648,8 +640,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
{
|
||||
unsigned version = le16_to_cpu(i->version);
|
||||
const char *err;
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
btree_err_on((version != BCH_BSET_VERSION_OLD &&
|
||||
@ -686,7 +678,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
BTREE_ERR_FIXABLE, c, ca, b, i,
|
||||
"bset past end of btree node")) {
|
||||
i->u64s = 0;
|
||||
return 0;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
btree_err_on(offset && !i->u64s,
|
||||
@ -737,14 +730,17 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
|
||||
BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
|
||||
"incorrect min_key: got %s should be %s",
|
||||
(bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
|
||||
(bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
|
||||
(printbuf_reset(&buf1),
|
||||
bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
|
||||
(printbuf_reset(&buf2),
|
||||
bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
|
||||
}
|
||||
|
||||
btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
|
||||
BTREE_ERR_MUST_RETRY, c, ca, b, i,
|
||||
"incorrect max key %s",
|
||||
(bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
|
||||
(printbuf_reset(&buf1),
|
||||
bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
|
||||
|
||||
if (write)
|
||||
compat_btree_node(b->c.level, b->c.btree_id, version,
|
||||
@ -759,7 +755,10 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
BSET_BIG_ENDIAN(i), write,
|
||||
&bn->format);
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -769,6 +768,8 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
{
|
||||
unsigned version = le16_to_cpu(i->version);
|
||||
struct bkey_packed *k, *prev = NULL;
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
|
||||
int ret = 0;
|
||||
@ -807,11 +808,10 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
(!updated_range ? bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?:
|
||||
(write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
|
||||
if (invalid) {
|
||||
char buf[160];
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
|
||||
printbuf_reset(&buf1);
|
||||
bch2_bkey_val_to_text(&buf1, c, u.s_c);
|
||||
btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
|
||||
"invalid bkey: %s\n%s", invalid, buf);
|
||||
"invalid bkey: %s\n%s", invalid, buf1.buf);
|
||||
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
@ -825,18 +825,18 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
&b->format, k);
|
||||
|
||||
if (prev && bkey_iter_cmp(b, prev, k) > 0) {
|
||||
char buf1[80];
|
||||
char buf2[80];
|
||||
struct bkey up = bkey_unpack_key(b, prev);
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf1), &up);
|
||||
bch2_bkey_to_text(&PBUF(buf2), u.k);
|
||||
printbuf_reset(&buf1);
|
||||
bch2_bkey_to_text(&buf1, &up);
|
||||
printbuf_reset(&buf2);
|
||||
bch2_bkey_to_text(&buf2, u.k);
|
||||
|
||||
bch2_dump_bset(c, b, i, 0);
|
||||
|
||||
if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
|
||||
"keys out of order: %s > %s",
|
||||
buf1, buf2)) {
|
||||
buf1.buf, buf2.buf)) {
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
(u64 *) vstruct_end(i) - (u64 *) k);
|
||||
@ -848,6 +848,8 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
k = bkey_next(k);
|
||||
}
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1063,11 +1065,12 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
if (invalid ||
|
||||
(bch2_inject_invalid_keys &&
|
||||
!bversion_cmp(u.k->version, MAX_VERSION))) {
|
||||
char buf[160];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
|
||||
bch2_bkey_val_to_text(&buf, c, u.s_c);
|
||||
btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
|
||||
"invalid bkey %s: %s", buf, invalid);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
btree_keys_account_key_drop(&b->nr, 0, k);
|
||||
|
||||
@ -1124,8 +1127,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bio *bio = &rb->bio;
|
||||
struct bch_io_failures failed = { .nr = 0 };
|
||||
char buf[200];
|
||||
struct printbuf out;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bool saw_error = false;
|
||||
bool can_retry;
|
||||
|
||||
@ -1145,10 +1147,10 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
bio->bi_status = BLK_STS_REMOVED;
|
||||
}
|
||||
start:
|
||||
out = PBUF(buf);
|
||||
btree_pos_to_text(&out, c, b);
|
||||
printbuf_reset(&buf);
|
||||
btree_pos_to_text(&buf, c, b);
|
||||
bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
|
||||
bch2_blk_status_to_str(bio->bi_status), buf);
|
||||
bch2_blk_status_to_str(bio->bi_status), buf.buf);
|
||||
if (rb->have_ioref)
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
rb->have_ioref = false;
|
||||
@ -1174,6 +1176,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
|
||||
rb->start_time);
|
||||
bio_put(&rb->bio);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (saw_error && !btree_node_read_error(b))
|
||||
bch2_btree_node_rewrite_async(c, b);
|
||||
@ -1254,6 +1257,7 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
container_of(cl, struct btree_node_read_all, cl);
|
||||
struct bch_fs *c = ra->c;
|
||||
struct btree *b = ra->b;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bool dump_bset_maps = false;
|
||||
bool have_retry = false;
|
||||
int ret = 0, best = -1, write = READ;
|
||||
@ -1297,8 +1301,6 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
fsck_err:
|
||||
if (dump_bset_maps) {
|
||||
for (i = 0; i < ra->nr; i++) {
|
||||
char buf[200];
|
||||
struct printbuf out = PBUF(buf);
|
||||
struct btree_node *bn = ra->buf[i];
|
||||
struct btree_node_entry *bne = NULL;
|
||||
unsigned offset = 0, sectors;
|
||||
@ -1307,6 +1309,8 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
if (ra->err[i])
|
||||
continue;
|
||||
|
||||
printbuf_reset(&buf);
|
||||
|
||||
while (offset < btree_sectors(c)) {
|
||||
if (!offset) {
|
||||
sectors = vstruct_sectors(bn, c->block_bits);
|
||||
@ -1317,10 +1321,10 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
sectors = vstruct_sectors(bne, c->block_bits);
|
||||
}
|
||||
|
||||
pr_buf(&out, " %u-%u", offset, offset + sectors);
|
||||
pr_buf(&buf, " %u-%u", offset, offset + sectors);
|
||||
if (bne && bch2_journal_seq_is_blacklisted(c,
|
||||
le64_to_cpu(bne->keys.journal_seq), false))
|
||||
pr_buf(&out, "*");
|
||||
pr_buf(&buf, "*");
|
||||
offset += sectors;
|
||||
}
|
||||
|
||||
@ -1328,19 +1332,19 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
bne = ra->buf[i] + (offset << 9);
|
||||
if (bne->keys.seq == bn->keys.seq) {
|
||||
if (!gap)
|
||||
pr_buf(&out, " GAP");
|
||||
pr_buf(&buf, " GAP");
|
||||
gap = true;
|
||||
|
||||
sectors = vstruct_sectors(bne, c->block_bits);
|
||||
pr_buf(&out, " %u-%u", offset, offset + sectors);
|
||||
pr_buf(&buf, " %u-%u", offset, offset + sectors);
|
||||
if (bch2_journal_seq_is_blacklisted(c,
|
||||
le64_to_cpu(bne->keys.journal_seq), false))
|
||||
pr_buf(&out, "*");
|
||||
pr_buf(&buf, "*");
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
|
||||
bch_err(c, "replica %u:%s", i, buf);
|
||||
bch_err(c, "replica %u:%s", i, buf.buf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1361,6 +1365,7 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
|
||||
|
||||
closure_debug_destroy(&ra->cl);
|
||||
kfree(ra);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
clear_btree_node_read_in_flight(b);
|
||||
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
|
||||
@ -1461,23 +1466,23 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
struct btree_read_bio *rb;
|
||||
struct bch_dev *ca;
|
||||
struct bio *bio;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
btree_pos_to_text(&PBUF(buf), c, b);
|
||||
btree_pos_to_text(&buf, c, b);
|
||||
trace_btree_read(c, b);
|
||||
|
||||
if (bch2_verify_all_btree_replicas &&
|
||||
!btree_node_read_all_replicas(c, b, sync))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
|
||||
NULL, &pick);
|
||||
if (bch2_fs_fatal_err_on(ret <= 0, c,
|
||||
"btree node read error: no device to read from\n"
|
||||
" at %s", buf)) {
|
||||
" at %s", buf.buf)) {
|
||||
set_btree_node_read_error(b);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
@ -1519,6 +1524,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
else
|
||||
queue_work(c->io_complete_wq, &rb->work);
|
||||
}
|
||||
out:
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
|
||||
|
@ -574,7 +574,9 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
|
||||
struct btree_node_iter tmp;
|
||||
bool locked;
|
||||
struct bkey_packed *p, *k;
|
||||
char buf1[100], buf2[100], buf3[100];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
struct printbuf buf3 = PRINTBUF;
|
||||
const char *msg;
|
||||
|
||||
if (!bch2_debug_check_iterators)
|
||||
@ -622,26 +624,27 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
|
||||
btree_node_unlock(path, level);
|
||||
return;
|
||||
err:
|
||||
strcpy(buf2, "(none)");
|
||||
strcpy(buf3, "(none)");
|
||||
|
||||
bch2_bpos_to_text(&PBUF(buf1), path->pos);
|
||||
bch2_bpos_to_text(&buf1, path->pos);
|
||||
|
||||
if (p) {
|
||||
struct bkey uk = bkey_unpack_key(l->b, p);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &uk);
|
||||
bch2_bkey_to_text(&buf2, &uk);
|
||||
} else {
|
||||
pr_buf(&buf2, "(none)");
|
||||
}
|
||||
|
||||
if (k) {
|
||||
struct bkey uk = bkey_unpack_key(l->b, k);
|
||||
bch2_bkey_to_text(&PBUF(buf3), &uk);
|
||||
bch2_bkey_to_text(&buf3, &uk);
|
||||
} else {
|
||||
pr_buf(&buf3, "(none)");
|
||||
}
|
||||
|
||||
panic("path should be %s key at level %u:\n"
|
||||
"path pos %s\n"
|
||||
"prev key %s\n"
|
||||
"cur key %s\n",
|
||||
msg, level, buf1, buf2, buf3);
|
||||
msg, level, buf1.buf, buf2.buf, buf3.buf);
|
||||
}
|
||||
|
||||
static void bch2_btree_path_verify(struct btree_trans *trans,
|
||||
@ -739,16 +742,16 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
if (!bkey_cmp(prev.k->p, k.k->p) &&
|
||||
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
|
||||
prev.k->p.snapshot) > 0) {
|
||||
char buf1[100], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf1), k.k);
|
||||
bch2_bkey_to_text(&PBUF(buf2), prev.k);
|
||||
bch2_bkey_to_text(&buf1, k.k);
|
||||
bch2_bkey_to_text(&buf2, prev.k);
|
||||
|
||||
panic("iter snap %u\n"
|
||||
"k %s\n"
|
||||
"prev %s\n",
|
||||
iter->snapshot,
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
}
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, ©);
|
||||
@ -760,7 +763,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned idx;
|
||||
char buf[100];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
trans_for_each_path_inorder(trans, path, idx) {
|
||||
int cmp = cmp_int(path->btree_id, id) ?:
|
||||
@ -786,9 +789,10 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
|
||||
}
|
||||
|
||||
bch2_dump_trans_paths_updates(trans);
|
||||
bch2_bpos_to_text(&buf, pos);
|
||||
|
||||
panic("not locked: %s %s%s\n",
|
||||
bch2_btree_ids[id],
|
||||
(bch2_bpos_to_text(&PBUF(buf), pos), buf),
|
||||
bch2_btree_ids[id], buf.buf,
|
||||
key_cache ? " cached" : "");
|
||||
}
|
||||
|
||||
@ -1071,23 +1075,23 @@ static void btree_path_verify_new_node(struct btree_trans *trans,
|
||||
if (!k ||
|
||||
bkey_deleted(k) ||
|
||||
bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
char buf3[100];
|
||||
char buf4[100];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
struct printbuf buf3 = PRINTBUF;
|
||||
struct printbuf buf4 = PRINTBUF;
|
||||
struct bkey uk = bkey_unpack_key(b, k);
|
||||
|
||||
bch2_dump_btree_node(c, l->b);
|
||||
bch2_bpos_to_text(&PBUF(buf1), path->pos);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &uk);
|
||||
bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
|
||||
bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
|
||||
bch2_bpos_to_text(&buf1, path->pos);
|
||||
bch2_bkey_to_text(&buf2, &uk);
|
||||
bch2_bpos_to_text(&buf3, b->data->min_key);
|
||||
bch2_bpos_to_text(&buf3, b->data->max_key);
|
||||
panic("parent iter doesn't point to new node:\n"
|
||||
"iter pos %s %s\n"
|
||||
"iter key %s\n"
|
||||
"new node %s-%s\n",
|
||||
bch2_btree_ids[path->btree_id], buf1,
|
||||
buf2, buf3, buf4);
|
||||
bch2_btree_ids[path->btree_id],
|
||||
buf1.buf, buf2.buf, buf3.buf, buf4.buf);
|
||||
}
|
||||
|
||||
if (!parent_locked)
|
||||
@ -1783,18 +1787,22 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
struct btree_insert_entry *i;
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
unsigned idx;
|
||||
char buf1[300], buf2[300];
|
||||
|
||||
btree_trans_sort_paths(trans);
|
||||
|
||||
trans_for_each_path_inorder(trans, path, idx)
|
||||
trans_for_each_path_inorder(trans, path, idx) {
|
||||
printbuf_reset(&buf1);
|
||||
|
||||
bch2_bpos_to_text(&buf1, path->pos);
|
||||
|
||||
printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
|
||||
path->idx, path->ref, path->intent_ref,
|
||||
path->should_be_locked ? " S" : "",
|
||||
path->preserve ? " P" : "",
|
||||
bch2_btree_ids[path->btree_id],
|
||||
(bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
|
||||
buf1.buf,
|
||||
path->nodes_locked,
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
(void *) path->ip_allocated
|
||||
@ -1802,17 +1810,25 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
|
||||
NULL
|
||||
#endif
|
||||
);
|
||||
}
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
struct bkey u;
|
||||
struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
|
||||
|
||||
printbuf_reset(&buf1);
|
||||
printbuf_reset(&buf2);
|
||||
bch2_bkey_val_to_text(&buf1, trans->c, old);
|
||||
bch2_bkey_val_to_text(&buf2, trans->c, bkey_i_to_s_c(i->k));
|
||||
|
||||
printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
|
||||
bch2_btree_ids[i->btree_id],
|
||||
(void *) i->ip_allocated,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
|
||||
(bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
|
||||
buf1.buf, buf2.buf);
|
||||
}
|
||||
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
}
|
||||
|
||||
static struct btree_path *btree_path_alloc(struct btree_trans *trans,
|
||||
|
@ -41,7 +41,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_btree_ptr_v2 bp;
|
||||
struct bkey unpacked;
|
||||
char buf1[100], buf2[100];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
|
||||
BUG_ON(!b->c.level);
|
||||
|
||||
@ -58,9 +58,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
|
||||
if (bpos_cmp(next_node, bp.v->min_key)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
panic("expected next min_key %s got %s\n",
|
||||
(bch2_bpos_to_text(&PBUF(buf1), next_node), buf1),
|
||||
(bch2_bpos_to_text(&PBUF(buf2), bp.v->min_key), buf2));
|
||||
bch2_bpos_to_text(&buf1, next_node);
|
||||
bch2_bpos_to_text(&buf2, bp.v->min_key);
|
||||
panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
|
||||
}
|
||||
|
||||
bch2_btree_node_iter_advance(&iter, b);
|
||||
@ -68,9 +68,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
if (bch2_btree_node_iter_end(&iter)) {
|
||||
if (bpos_cmp(k.k->p, b->key.k.p)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
panic("expected end %s got %s\n",
|
||||
(bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1),
|
||||
(bch2_bpos_to_text(&PBUF(buf2), k.k->p), buf2));
|
||||
bch2_bpos_to_text(&buf1, b->key.k.p);
|
||||
bch2_bpos_to_text(&buf2, k.k->p);
|
||||
panic("expected end %s got %s\n", buf1.buf, buf2.buf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1151,10 +1151,11 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
|
||||
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?:
|
||||
bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert));
|
||||
if (invalid) {
|
||||
char buf[160];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert));
|
||||
bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf, invalid);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
|
||||
bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf.buf, invalid);
|
||||
printbuf_exit(&buf);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
@ -1636,15 +1637,17 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
|
||||
char buf1[100], buf2[100];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
|
||||
bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key);
|
||||
bch2_bpos_to_text(&PBUF(buf2), next->data->min_key);
|
||||
bch2_bpos_to_text(&buf1, prev->data->max_key);
|
||||
bch2_bpos_to_text(&buf2, next->data->min_key);
|
||||
bch_err(c,
|
||||
"btree topology error in btree merge:\n"
|
||||
" prev ends at %s\n"
|
||||
" next starts at %s",
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
printbuf_exit(&buf1);
|
||||
printbuf_exit(&buf2);
|
||||
bch2_topology_error(c);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
|
@ -831,11 +831,12 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
|
||||
const char *invalid = bch2_bkey_invalid(c,
|
||||
bkey_i_to_s_c(i->k), i->bkey_type);
|
||||
if (invalid) {
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
|
||||
bch2_fs_fatal_error(c, "invalid bkey %s on insert from %s -> %ps: %s\n",
|
||||
buf, trans->fn, (void *) i->ip_allocated, invalid);
|
||||
buf.buf, trans->fn, (void *) i->ip_allocated, invalid);
|
||||
printbuf_exit(&buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
btree_insert_entry_checks(trans, i);
|
||||
|
@ -376,22 +376,23 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
|
||||
{
|
||||
struct bch_fs_usage __percpu *fs_usage;
|
||||
int idx, ret = 0;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
buf.atomic++;
|
||||
|
||||
idx = bch2_replicas_entry_idx(c, r);
|
||||
if (idx < 0 &&
|
||||
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
||||
fsck_err(c, "no replicas entry\n"
|
||||
" while marking %s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) {
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
|
||||
percpu_up_read(&c->mark_lock);
|
||||
ret = bch2_mark_replicas(c, r);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
idx = bch2_replicas_entry_idx(c, r);
|
||||
}
|
||||
if (idx < 0) {
|
||||
@ -407,6 +408,7 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
|
||||
err:
|
||||
fsck_err:
|
||||
percpu_up_read(&c->mark_lock);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -678,7 +680,8 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
u16 bucket_sectors = !ptr->cached
|
||||
? dirty_sectors
|
||||
: cached_sectors;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (gen_after(ptr->gen, b_gen)) {
|
||||
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
||||
@ -687,8 +690,9 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
ptr->dev, bucket_nr, b_gen,
|
||||
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
||||
ptr->gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EIO;
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
|
||||
@ -698,8 +702,10 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
ptr->dev, bucket_nr, b_gen,
|
||||
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
||||
ptr->gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EIO;
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (b_gen != ptr->gen && !ptr->cached) {
|
||||
@ -710,12 +716,16 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
*bucket_gen(ca, bucket_nr),
|
||||
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
||||
ptr->gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EIO;
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (b_gen != ptr->gen)
|
||||
return 1;
|
||||
if (b_gen != ptr->gen) {
|
||||
ret = 1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bucket_data_type && ptr_data_type &&
|
||||
bucket_data_type != ptr_data_type) {
|
||||
@ -725,8 +735,10 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
ptr->dev, bucket_nr, b_gen,
|
||||
bch2_data_types[bucket_data_type],
|
||||
bch2_data_types[ptr_data_type],
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EIO;
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
|
||||
@ -736,11 +748,14 @@ static int check_bucket_ref(struct bch_fs *c,
|
||||
ptr->dev, bucket_nr, b_gen,
|
||||
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
||||
bucket_sectors, sectors,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EIO;
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
@ -759,7 +774,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g;
|
||||
struct bucket_mark new, old;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
@ -767,6 +782,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
/* * XXX doesn't handle deletion */
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
buf.atomic++;
|
||||
g = PTR_GC_BUCKET(ca, ptr);
|
||||
|
||||
if (g->mark.dirty_sectors ||
|
||||
@ -774,7 +790,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
bch2_fs_inconsistent(c,
|
||||
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
|
||||
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
@ -799,8 +815,8 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
|
||||
err:
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
return 0;
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __mark_pointer(struct btree_trans *trans,
|
||||
@ -987,10 +1003,11 @@ static int bch2_mark_extent(struct btree_trans *trans,
|
||||
if (r.e.nr_devs) {
|
||||
ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
|
||||
if (ret) {
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, k);
|
||||
bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -1019,13 +1036,16 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||
|
||||
if (!m || (old_s && !m->alive)) {
|
||||
char buf1[200], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
|
||||
bch2_bkey_val_to_text(&buf1, c, old);
|
||||
bch2_bkey_val_to_text(&buf2, c, new);
|
||||
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
|
||||
"old %s\n"
|
||||
"new %s", idx, buf1, buf2);
|
||||
"new %s", idx, buf1.buf, buf2.buf);
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
bch2_inconsistent_error(c);
|
||||
return -1;
|
||||
}
|
||||
@ -1090,10 +1110,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
((s64) m->sectors * m->nr_redundant),
|
||||
journal_seq, gc);
|
||||
if (ret) {
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, new);
|
||||
bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
|
||||
bch2_bkey_val_to_text(&buf, c, new);
|
||||
bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -1174,7 +1195,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
u64 next_idx = end;
|
||||
s64 ret = 0;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
if (r_idx >= c->reflink_gc_nr)
|
||||
goto not_found;
|
||||
@ -1193,7 +1214,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
if (fsck_err(c, "pointer to missing indirect extent\n"
|
||||
" %s\n"
|
||||
" missing range %llu-%llu",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c), buf),
|
||||
(bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
|
||||
*idx, next_idx)) {
|
||||
struct bkey_i_error new;
|
||||
|
||||
@ -1207,6 +1228,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
|
||||
*idx = next_idx;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1289,7 +1311,7 @@ void fs_usage_apply_warn(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch_err(c, "disk usage increased %lli more than %u sectors reserved",
|
||||
should_not_have_added, disk_res_sectors);
|
||||
@ -1298,13 +1320,17 @@ void fs_usage_apply_warn(struct btree_trans *trans,
|
||||
struct bkey_s_c old = { &i->old_k, i->old_v };
|
||||
|
||||
pr_err("while inserting");
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
|
||||
pr_err(" %s", buf);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
|
||||
pr_err(" %s", buf.buf);
|
||||
pr_err("overlapping with");
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, old);
|
||||
pr_err(" %s", buf);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, old);
|
||||
pr_err(" %s", buf.buf);
|
||||
}
|
||||
|
||||
__WARN();
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
int bch2_trans_fs_usage_apply(struct btree_trans *trans,
|
||||
@ -1744,7 +1770,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
struct bkey_i *n;
|
||||
__le64 *refcount;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
|
||||
@ -1764,19 +1790,19 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
|
||||
refcount = bkey_refcount(n);
|
||||
if (!refcount) {
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
|
||||
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
||||
bch2_fs_inconsistent(c,
|
||||
"nonexistent indirect extent at %llu while marking\n %s",
|
||||
*idx, buf);
|
||||
*idx, buf.buf);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
|
||||
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
||||
bch2_fs_inconsistent(c,
|
||||
"indirect extent refcount underflow at %llu while marking\n %s",
|
||||
*idx, buf);
|
||||
*idx, buf.buf);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
@ -1811,6 +1837,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
*idx = k.k->p.offset;
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -157,6 +157,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
|
||||
unsigned long now;
|
||||
unsigned i;
|
||||
|
||||
out->atomic++;
|
||||
spin_lock(&clock->timer_lock);
|
||||
now = atomic64_read(&clock->now);
|
||||
|
||||
@ -165,6 +166,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
|
||||
clock->timers.data[i]->fn,
|
||||
clock->timers.data[i]->expire - now);
|
||||
spin_unlock(&clock->timer_lock);
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
void bch2_io_clock_exit(struct io_clock *clock)
|
||||
|
@ -169,10 +169,11 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
failed |= bch2_btree_verify_replica(c, b, p);
|
||||
|
||||
if (failed) {
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key));
|
||||
bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
|
||||
bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&c->verify_lock);
|
||||
@ -188,8 +189,7 @@ struct dump_iter {
|
||||
struct bch_fs *c;
|
||||
enum btree_id id;
|
||||
|
||||
char buf[1 << 12];
|
||||
size_t bytes; /* what's currently in buf */
|
||||
struct printbuf buf;
|
||||
|
||||
char __user *ubuf; /* destination user buffer */
|
||||
size_t size; /* size of requested read */
|
||||
@ -198,9 +198,9 @@ struct dump_iter {
|
||||
|
||||
static int flush_buf(struct dump_iter *i)
|
||||
{
|
||||
if (i->bytes) {
|
||||
size_t bytes = min(i->bytes, i->size);
|
||||
int err = copy_to_user(i->ubuf, i->buf, bytes);
|
||||
if (i->buf.pos) {
|
||||
size_t bytes = min_t(size_t, i->buf.pos, i->size);
|
||||
int err = copy_to_user(i->ubuf, i->buf.buf, bytes);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
@ -208,8 +208,8 @@ static int flush_buf(struct dump_iter *i)
|
||||
i->ret += bytes;
|
||||
i->ubuf += bytes;
|
||||
i->size -= bytes;
|
||||
i->bytes -= bytes;
|
||||
memmove(i->buf, i->buf + bytes, i->bytes);
|
||||
i->buf.pos -= bytes;
|
||||
memmove(i->buf.buf, i->buf.buf + bytes, i->buf.pos);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -228,13 +228,17 @@ static int bch2_dump_open(struct inode *inode, struct file *file)
|
||||
i->from = POS_MIN;
|
||||
i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
|
||||
i->id = bd->id;
|
||||
i->buf = PRINTBUF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_dump_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
kfree(file->private_data);
|
||||
struct dump_iter *i = file->private_data;
|
||||
|
||||
printbuf_exit(&i->buf);
|
||||
kfree(i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -266,11 +270,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
|
||||
while (k.k && !(err = bkey_err(k))) {
|
||||
bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k);
|
||||
i->bytes = strlen(i->buf);
|
||||
BUG_ON(i->bytes >= sizeof(i->buf));
|
||||
i->buf[i->bytes] = '\n';
|
||||
i->bytes++;
|
||||
bch2_bkey_val_to_text(&i->buf, i->c, k);
|
||||
pr_char(&i->buf, '\n');
|
||||
|
||||
k = bch2_btree_iter_next(&iter);
|
||||
i->from = iter.pos;
|
||||
@ -319,8 +320,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
|
||||
bch2_trans_init(&trans, i->c, 0, 0);
|
||||
|
||||
for_each_btree_node(&trans, iter, i->id, i->from, 0, b, err) {
|
||||
bch2_btree_node_to_text(&PBUF(i->buf), i->c, b);
|
||||
i->bytes = strlen(i->buf);
|
||||
bch2_btree_node_to_text(&i->buf, i->c, b);
|
||||
err = flush_buf(i);
|
||||
if (err)
|
||||
break;
|
||||
@ -384,16 +384,14 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b);
|
||||
|
||||
if (l->b != prev_node) {
|
||||
bch2_btree_node_to_text(&PBUF(i->buf), i->c, l->b);
|
||||
i->bytes = strlen(i->buf);
|
||||
bch2_btree_node_to_text(&i->buf, i->c, l->b);
|
||||
err = flush_buf(i);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
prev_node = l->b;
|
||||
|
||||
bch2_bfloat_to_text(&PBUF(i->buf), l->b, _k);
|
||||
i->bytes = strlen(i->buf);
|
||||
bch2_bfloat_to_text(&i->buf, l->b, _k);
|
||||
err = flush_buf(i);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -286,14 +286,15 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
||||
struct bch_csum got = ec_block_checksum(buf, i, offset);
|
||||
|
||||
if (bch2_crc_cmp(want, got)) {
|
||||
char buf2[200];
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&buf->key.k_i));
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
|
||||
|
||||
bch_err_ratelimited(c,
|
||||
"stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
|
||||
(void *) _RET_IP_, i, j, v->csum_type,
|
||||
want.lo, got.lo, buf2);
|
||||
want.lo, got.lo, buf2.buf);
|
||||
printbuf_exit(&buf2);
|
||||
clear_bit(i, buf->valid);
|
||||
break;
|
||||
}
|
||||
|
@ -1676,7 +1676,8 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct bch_fs *c = root->d_sb->s_fs_info;
|
||||
enum bch_opt_id i;
|
||||
char buf[512];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < bch2_opts_nr; i++) {
|
||||
const struct bch_option *opt = &bch2_opt_table[i];
|
||||
@ -1688,13 +1689,17 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
|
||||
if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
|
||||
continue;
|
||||
|
||||
bch2_opt_to_text(&PBUF(buf), c, opt, v,
|
||||
printbuf_reset(&buf);
|
||||
bch2_opt_to_text(&buf, c, opt, v,
|
||||
OPT_SHOW_MOUNT_STYLE);
|
||||
seq_putc(seq, ',');
|
||||
seq_puts(seq, buf);
|
||||
seq_puts(seq, buf.buf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (buf.allocation_failure)
|
||||
ret = -ENOMEM;
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_put_super(struct super_block *sb)
|
||||
|
@ -698,15 +698,16 @@ static int check_key_has_snapshot(struct btree_trans *trans,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (mustfix_fsck_err_on(!snapshot_t(c, k.k->p.snapshot)->equiv, c,
|
||||
"key in missing snapshot: %s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -746,7 +747,7 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter = { NULL };
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct bkey_s_c k;
|
||||
u64 hash;
|
||||
int ret = 0;
|
||||
@ -770,8 +771,9 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
if (fsck_err_on(k.k->type == desc.key_type &&
|
||||
!desc.cmp_bkey(k, hash_k), c,
|
||||
"duplicate hash table keys:\n%s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
||||
hash_k), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, hash_k),
|
||||
buf.buf))) {
|
||||
ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
|
||||
break;
|
||||
}
|
||||
@ -782,13 +784,16 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
}
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
bad_hash:
|
||||
if (fsck_err(c, "hash table key at wrong offset: btree %u inode %llu offset %llu, "
|
||||
"hashed to %llu\n%s",
|
||||
desc.btree_id, hash_k.k->p.inode, hash_k.k->p.offset, hash,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, hash_k), buf)) == FSCK_ERR_IGNORE)
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf)) == FSCK_ERR_IGNORE)
|
||||
return 0;
|
||||
|
||||
ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
|
||||
@ -796,9 +801,9 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
bch_err(c, "hash_redo_key err %i", ret);
|
||||
return ret;
|
||||
}
|
||||
return -EINTR;
|
||||
ret = -EINTR;
|
||||
fsck_err:
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int check_inode(struct btree_trans *trans,
|
||||
@ -1166,32 +1171,34 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k;
|
||||
struct inode_walker_entry *i;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
if (!k.k)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
if (ret)
|
||||
return ret < 0 ? ret : 0;
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = snapshots_seen_update(c, s, k.k->p);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (k.k->type == KEY_TYPE_whiteout)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (inode->cur_inum != k.k->p.inode) {
|
||||
ret = check_i_sectors(trans, inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
#if 0
|
||||
if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
|
||||
@ -1201,22 +1208,29 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev.k));
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, k);
|
||||
|
||||
if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2))
|
||||
return fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
|
||||
if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2)) {
|
||||
ret = fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ret = __walk_inode(trans, inode, k.k->p);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(ret == INT_MAX, c,
|
||||
"extent in missing inode:\n %s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == INT_MAX)
|
||||
return 0;
|
||||
if (ret == INT_MAX) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = inode->d + ret;
|
||||
ret = 0;
|
||||
@ -1225,9 +1239,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
!S_ISLNK(i->inode.bi_mode), c,
|
||||
"extent in non regular inode mode %o:\n %s",
|
||||
i->inode.bi_mode,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!bch2_snapshot_internal_node(c, k.k->p.snapshot)) {
|
||||
for_each_visible_inode(c, s, inode, k.k->p.snapshot, i) {
|
||||
@ -1237,11 +1254,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
"extent type %u offset %llu past end of inode %llu, i_size %llu",
|
||||
k.k->type, k.k->p.offset, k.k->p.inode, i->inode.bi_size)) {
|
||||
bch2_fs_lazy_rw(c);
|
||||
return bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
|
||||
ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
|
||||
SPOS(k.k->p.inode, round_up(i->inode.bi_size, block_bytes(c)) >> 9,
|
||||
k.k->p.snapshot),
|
||||
POS(k.k->p.inode, U64_MAX),
|
||||
0, NULL) ?: -EINTR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1253,7 +1271,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_bkey_buf_reassemble(&prev, c, k);
|
||||
#endif
|
||||
|
||||
out:
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1351,7 +1372,7 @@ static int check_dirent_target(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_i_dirent *n;
|
||||
bool backpointer_exists = true;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (!target->bi_dir &&
|
||||
@ -1377,9 +1398,7 @@ static int check_dirent_target(struct btree_trans *trans,
|
||||
"directory %llu with multiple links",
|
||||
target->bi_inum)) {
|
||||
ret = __remove_dirent(trans, d.k->p);
|
||||
if (ret)
|
||||
goto err;
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fsck_err_on(backpointer_exists &&
|
||||
@ -1416,18 +1435,19 @@ static int check_dirent_target(struct btree_trans *trans,
|
||||
"incorrect d_type: got %s, should be %s:\n%s",
|
||||
bch2_d_type_str(d.v->d_type),
|
||||
bch2_d_type_str(inode_d_type(target)),
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, d.s_c), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&n->k_i, d.s_c);
|
||||
n->v.d_type = inode_d_type(target);
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &n->k_i, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
d = dirent_i_to_s_c(n);
|
||||
}
|
||||
@ -1441,19 +1461,21 @@ static int check_dirent_target(struct btree_trans *trans,
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&n->k_i, d.s_c);
|
||||
n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &n->k_i, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
d = dirent_i_to_s_c(n);
|
||||
}
|
||||
out:
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1467,46 +1489,53 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_dirent d;
|
||||
struct inode_walker_entry *i;
|
||||
char buf[200];
|
||||
int ret;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
if (!k.k)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
if (ret)
|
||||
return ret < 0 ? ret : 0;
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = snapshots_seen_update(c, s, k.k->p);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (k.k->type == KEY_TYPE_whiteout)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (dir->cur_inum != k.k->p.inode) {
|
||||
ret = check_subdir_count(trans, dir);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = __walk_inode(trans, dir, k.k->p);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(ret == INT_MAX, c,
|
||||
"dirent in nonexisting directory:\n%s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == INT_MAX)
|
||||
return 0;
|
||||
if (ret == INT_MAX) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = dir->d + ret;
|
||||
ret = 0;
|
||||
@ -1514,8 +1543,11 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
|
||||
"dirent in non directory inode type %s:\n%s",
|
||||
bch2_d_type_str(inode_d_type(&i->inode)),
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||
return bch2_btree_delete_at(trans, iter, 0);
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dir->first_this_inode)
|
||||
*hash_info = bch2_hash_info_init(c, &dir->d[0].inode);
|
||||
@ -1523,12 +1555,15 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
ret = hash_check_key(trans, bch2_dirent_hash_desc,
|
||||
hash_info, iter, k);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret) /* dirent has been deleted */
|
||||
return 0;
|
||||
goto err;
|
||||
if (ret) {
|
||||
/* dirent has been deleted */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (k.k->type != KEY_TYPE_dirent)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
d = bkey_s_c_to_dirent(k);
|
||||
|
||||
@ -1541,24 +1576,27 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
ret = __subvol_lookup(trans, target_subvol,
|
||||
&target_snapshot, &target_inum);
|
||||
if (ret && ret != -ENOENT)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(ret, c,
|
||||
"dirent points to missing subvolume %llu",
|
||||
le64_to_cpu(d.v->d_child_subvol)))
|
||||
return __remove_dirent(trans, d.k->p);
|
||||
le64_to_cpu(d.v->d_child_subvol))) {
|
||||
ret = __remove_dirent(trans, d.k->p);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = __lookup_inode(trans, target_inum,
|
||||
&subvol_root, &target_snapshot);
|
||||
if (ret && ret != -ENOENT)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(ret, c,
|
||||
"subvolume %u points to missing subvolume root %llu",
|
||||
target_subvol,
|
||||
target_inum)) {
|
||||
bch_err(c, "repair not implemented yet");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
|
||||
@ -1568,32 +1606,33 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
subvol_root.bi_subvol = target_subvol;
|
||||
ret = __write_inode(trans, &subvol_root, target_snapshot);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = check_dirent_target(trans, iter, d, &subvol_root,
|
||||
target_snapshot);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
} else {
|
||||
ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(!target->nr, c,
|
||||
"dirent points to missing inode:\n%s",
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
||||
k), buf))) {
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k),
|
||||
buf.buf))) {
|
||||
ret = __remove_dirent(trans, d.k->p);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = target->d; i < target->d + target->nr; i++) {
|
||||
ret = check_dirent_target(trans, iter, d,
|
||||
&i->inode, i->snapshot);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1601,7 +1640,10 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
|
||||
i->count++;
|
||||
|
||||
out:
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2057,11 +2057,11 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
|
||||
struct btree_iter iter;
|
||||
char buf[200];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, k);
|
||||
bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf);
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf.buf);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)),
|
||||
@ -2069,12 +2069,14 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
||||
|
||||
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
if (ret)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, k);
|
||||
bch_err(c, "%s", buf);
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
out:
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
|
@ -414,18 +414,18 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
|
||||
!can_discard &&
|
||||
j->reservations.idx == j->reservations.unwritten_idx &&
|
||||
(flags & JOURNAL_RES_GET_RESERVED)) {
|
||||
char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch_err(c, "Journal stuck! Hava a pre-reservation but journal full");
|
||||
if (journal_debug_buf) {
|
||||
bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
|
||||
bch_err(c, "%s", journal_debug_buf);
|
||||
|
||||
bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j);
|
||||
bch_err(c, "Journal pins:\n%s", journal_debug_buf);
|
||||
kfree(journal_debug_buf);
|
||||
}
|
||||
bch2_journal_debug_to_text(&buf, j);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_journal_pins_to_text(&buf, j);
|
||||
bch_err(c, "Journal pins:\n%s", buf.buf);
|
||||
|
||||
printbuf_exit(&buf);
|
||||
bch2_fatal_error(c);
|
||||
dump_stack();
|
||||
}
|
||||
@ -1186,6 +1186,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
unsigned long now = jiffies;
|
||||
unsigned i;
|
||||
|
||||
out->atomic++;
|
||||
|
||||
rcu_read_lock();
|
||||
s = READ_ONCE(j->reservations);
|
||||
|
||||
@ -1270,6 +1272,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
@ -1286,6 +1290,8 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
|
||||
u64 i;
|
||||
|
||||
spin_lock(&j->lock);
|
||||
out->atomic++;
|
||||
|
||||
fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
|
||||
pr_buf(out, "%llu: count %u\n",
|
||||
i, atomic_read(&pin_list->count));
|
||||
@ -1305,5 +1311,7 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
|
||||
pr_buf(out, "\t%px %ps\n",
|
||||
pin, pin->flush);
|
||||
}
|
||||
|
||||
--out->atomic;
|
||||
spin_unlock(&j->lock);
|
||||
}
|
||||
|
@ -251,14 +251,15 @@ static int journal_validate_key(struct bch_fs *c, const char *where,
|
||||
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
|
||||
__btree_node_type(level, btree_id));
|
||||
if (invalid) {
|
||||
char buf[160];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
|
||||
mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
|
||||
type, where,
|
||||
(u64 *) k - entry->_data,
|
||||
le16_to_cpu(entry->u64s),
|
||||
invalid, buf);
|
||||
invalid, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
|
||||
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
|
||||
@ -995,6 +996,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
struct journal_replay *i, *t;
|
||||
struct bch_dev *ca;
|
||||
unsigned iter;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
size_t keys = 0, entries = 0;
|
||||
bool degraded = false;
|
||||
u64 seq, last_seq = 0;
|
||||
@ -1053,7 +1055,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
|
||||
if (!last_seq) {
|
||||
fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Drop blacklisted entries and entries older than last_seq: */
|
||||
@ -1085,7 +1088,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
|
||||
while (seq < le64_to_cpu(i->j.seq)) {
|
||||
u64 missing_start, missing_end;
|
||||
char buf1[200], buf2[200];
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
|
||||
while (seq < le64_to_cpu(i->j.seq) &&
|
||||
bch2_journal_seq_is_blacklisted(c, seq, false))
|
||||
@ -1101,14 +1104,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
seq++;
|
||||
|
||||
if (i->list.prev != list) {
|
||||
struct printbuf out = PBUF(buf1);
|
||||
struct journal_replay *p = list_prev_entry(i, list);
|
||||
|
||||
bch2_journal_ptrs_to_text(&out, c, p);
|
||||
pr_buf(&out, " size %zu", vstruct_sectors(&p->j, c->block_bits));
|
||||
bch2_journal_ptrs_to_text(&buf1, c, p);
|
||||
pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
|
||||
} else
|
||||
sprintf(buf1, "(none)");
|
||||
bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
|
||||
pr_buf(&buf1, "(none)");
|
||||
bch2_journal_ptrs_to_text(&buf2, c, i);
|
||||
|
||||
missing_end = seq - 1;
|
||||
fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
|
||||
@ -1116,7 +1118,10 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
" next at %s",
|
||||
missing_start, missing_end,
|
||||
last_seq, *blacklist_seq - 1,
|
||||
buf1, buf2);
|
||||
buf1.buf, buf2.buf);
|
||||
|
||||
printbuf_exit(&buf1);
|
||||
printbuf_exit(&buf2);
|
||||
}
|
||||
|
||||
seq++;
|
||||
@ -1130,14 +1135,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
.e.nr_required = 1,
|
||||
};
|
||||
unsigned ptr;
|
||||
char buf[80];
|
||||
|
||||
if (i->ignore)
|
||||
continue;
|
||||
|
||||
ret = jset_validate_entries(c, &i->j, READ);
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
goto err;
|
||||
|
||||
for (ptr = 0; ptr < i->nr_ptrs; ptr++)
|
||||
replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
|
||||
@ -1149,15 +1153,17 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
* the devices - this is wrong:
|
||||
*/
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_replicas_entry_to_text(&buf, &replicas.e);
|
||||
|
||||
if (!degraded &&
|
||||
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
||||
fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
|
||||
"superblock not marked as containing replicas %s",
|
||||
(bch2_replicas_entry_to_text(&PBUF(buf),
|
||||
&replicas.e), buf)))) {
|
||||
buf.buf))) {
|
||||
ret = bch2_mark_replicas(c, &replicas.e);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_jset_key(k, _n, entry, &i->j)
|
||||
@ -1171,7 +1177,9 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
|
||||
if (*start_seq != *blacklist_seq)
|
||||
bch_info(c, "dropped unflushed entries %llu-%llu",
|
||||
*blacklist_seq, *start_seq - 1);
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1481,7 +1489,7 @@ void bch2_journal_write(struct closure *cl)
|
||||
struct jset_entry *start, *end;
|
||||
struct jset *jset;
|
||||
struct bio *bio;
|
||||
char *journal_debug_buf = NULL;
|
||||
struct printbuf journal_debug_buf = PRINTBUF;
|
||||
bool validate_before_checksum = false;
|
||||
unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
|
||||
int ret;
|
||||
@ -1586,11 +1594,8 @@ void bch2_journal_write(struct closure *cl)
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
|
||||
if (journal_debug_buf)
|
||||
__bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
|
||||
}
|
||||
if (ret)
|
||||
__bch2_journal_debug_to_text(&journal_debug_buf, j);
|
||||
|
||||
/*
|
||||
* write is allocated, no longer need to account for it in
|
||||
@ -1607,8 +1612,8 @@ void bch2_journal_write(struct closure *cl)
|
||||
|
||||
if (ret) {
|
||||
bch_err(c, "Unable to allocate journal write:\n%s",
|
||||
journal_debug_buf);
|
||||
kfree(journal_debug_buf);
|
||||
journal_debug_buf.buf);
|
||||
printbuf_exit(&journal_debug_buf);
|
||||
bch2_fatal_error(c);
|
||||
continue_at(cl, journal_write_done, c->io_complete_wq);
|
||||
return;
|
||||
|
@ -216,14 +216,11 @@ void bch2_journal_space_available(struct journal *j)
|
||||
if (!clean_ondisk &&
|
||||
j->reservations.idx ==
|
||||
j->reservations.unwritten_idx) {
|
||||
char *buf = kmalloc(4096, GFP_ATOMIC);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch_err(c, "journal stuck");
|
||||
if (buf) {
|
||||
__bch2_journal_debug_to_text(&_PBUF(buf, 4096), j);
|
||||
pr_err("\n%s", buf);
|
||||
kfree(buf);
|
||||
}
|
||||
__bch2_journal_debug_to_text(&buf, j);
|
||||
bch_err(c, "journal stuck\n%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
bch2_fatal_error(c);
|
||||
ret = cur_entry_journal_stuck;
|
||||
|
@ -257,35 +257,47 @@ void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
struct bch_fs_rebalance *r = &c->rebalance;
|
||||
struct rebalance_work w = rebalance_work(c);
|
||||
char h1[21], h2[21];
|
||||
|
||||
bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
|
||||
bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
|
||||
pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
|
||||
w.dev_most_full_idx, h1, h2);
|
||||
out->tabstops[0] = 20;
|
||||
|
||||
bch2_hprint(&PBUF(h1), w.total_work << 9);
|
||||
bch2_hprint(&PBUF(h2), c->capacity << 9);
|
||||
pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
|
||||
pr_buf(out, "fullest_dev (%i):", w.dev_most_full_idx);
|
||||
pr_tab(out);
|
||||
|
||||
pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate);
|
||||
bch2_hprint(out, w.dev_most_full_work << 9);
|
||||
pr_buf(out, "/");
|
||||
bch2_hprint(out, w.dev_most_full_capacity << 9);
|
||||
pr_newline(out);
|
||||
|
||||
pr_buf(out, "total work:");
|
||||
pr_tab(out);
|
||||
|
||||
bch2_hprint(out, w.total_work << 9);
|
||||
pr_buf(out, "/");
|
||||
bch2_hprint(out, c->capacity << 9);
|
||||
pr_newline(out);
|
||||
|
||||
pr_buf(out, "rate:");
|
||||
pr_tab(out);
|
||||
pr_buf(out, "%u", r->pd.rate.rate);
|
||||
pr_newline(out);
|
||||
|
||||
switch (r->state) {
|
||||
case REBALANCE_WAITING:
|
||||
pr_buf(out, "waiting\n");
|
||||
pr_buf(out, "waiting");
|
||||
break;
|
||||
case REBALANCE_THROTTLED:
|
||||
bch2_hprint(&PBUF(h1),
|
||||
pr_buf(out, "throttled for %lu sec or ",
|
||||
(r->throttled_until_cputime - jiffies) / HZ);
|
||||
bch2_hprint(out,
|
||||
(r->throttled_until_iotime -
|
||||
atomic64_read(&c->io_clock[WRITE].now)) << 9);
|
||||
pr_buf(out, "throttled for %lu sec or %s io\n",
|
||||
(r->throttled_until_cputime - jiffies) / HZ,
|
||||
h1);
|
||||
pr_buf(out, " io");
|
||||
break;
|
||||
case REBALANCE_RUNNING:
|
||||
pr_buf(out, "running\n");
|
||||
pr_buf(out, "running");
|
||||
break;
|
||||
}
|
||||
pr_newline(out);
|
||||
}
|
||||
|
||||
void bch2_rebalance_stop(struct bch_fs *c)
|
||||
|
@ -760,6 +760,8 @@ static int verify_superblock_clean(struct bch_fs *c,
|
||||
{
|
||||
unsigned i;
|
||||
struct bch_sb_field_clean *clean = *cleanp;
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
|
||||
@ -772,7 +774,6 @@ static int verify_superblock_clean(struct bch_fs *c,
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++) {
|
||||
char buf1[200], buf2[200];
|
||||
struct bkey_i *k1, *k2;
|
||||
unsigned l1 = 0, l2 = 0;
|
||||
|
||||
@ -782,6 +783,19 @@ static int verify_superblock_clean(struct bch_fs *c,
|
||||
if (!k1 && !k2)
|
||||
continue;
|
||||
|
||||
printbuf_reset(&buf1);
|
||||
printbuf_reset(&buf2);
|
||||
|
||||
if (k1)
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
|
||||
else
|
||||
pr_buf(&buf1, "(none)");
|
||||
|
||||
if (k2)
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
|
||||
else
|
||||
pr_buf(&buf2, "(none)");
|
||||
|
||||
mustfix_fsck_err_on(!k1 || !k2 ||
|
||||
IS_ERR(k1) ||
|
||||
IS_ERR(k2) ||
|
||||
@ -791,10 +805,12 @@ static int verify_superblock_clean(struct bch_fs *c,
|
||||
"superblock btree root %u doesn't match journal after clean shutdown\n"
|
||||
"sb: l=%u %s\n"
|
||||
"journal: l=%u %s\n", i,
|
||||
l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
|
||||
l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
|
||||
l1, buf1.buf,
|
||||
l2, buf2.buf);
|
||||
}
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -997,11 +997,12 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
||||
|
||||
if (dflags & ~flags) {
|
||||
if (print) {
|
||||
char buf[100];
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_replicas_entry_to_text(&PBUF(buf), e);
|
||||
bch2_replicas_entry_to_text(&buf, e);
|
||||
bch_err(c, "insufficient devices online (%u) for replicas entry %s",
|
||||
nr_online, buf);
|
||||
nr_online, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
ret = false;
|
||||
break;
|
||||
|
@ -572,16 +572,10 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
|
||||
{
|
||||
u64 offset = opt_get(*opts, sb);
|
||||
struct bch_sb_layout layout;
|
||||
char *_err;
|
||||
struct printbuf err;
|
||||
struct printbuf err = PRINTBUF;
|
||||
__le64 *i;
|
||||
int ret;
|
||||
|
||||
_err = kmalloc(4096, GFP_KERNEL);
|
||||
if (!_err)
|
||||
return -ENOMEM;
|
||||
err = _PBUF(_err, 4096);
|
||||
|
||||
pr_verbose_init(*opts, "");
|
||||
|
||||
memset(sb, 0, sizeof(*sb));
|
||||
@ -633,8 +627,8 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
|
||||
goto err;
|
||||
|
||||
printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s",
|
||||
path, _err);
|
||||
err = _PBUF(_err, 4096);
|
||||
path, err.buf);
|
||||
printbuf_reset(&err);
|
||||
|
||||
/*
|
||||
* Error reading primary superblock - read location of backup
|
||||
@ -689,16 +683,16 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
|
||||
ret = bch2_sb_validate(sb, &err);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "bcachefs (%s): error validating superblock: %s",
|
||||
path, _err);
|
||||
path, err.buf);
|
||||
goto err_no_print;
|
||||
}
|
||||
out:
|
||||
pr_verbose_init(*opts, "ret %i", ret);
|
||||
kfree(_err);
|
||||
printbuf_exit(&err);
|
||||
return ret;
|
||||
err:
|
||||
printk(KERN_ERR "bcachefs (%s): error reading superblock: %s",
|
||||
path, _err);
|
||||
path, err.buf);
|
||||
err_no_print:
|
||||
bch2_free_super(sb);
|
||||
goto out;
|
||||
@ -768,6 +762,7 @@ int bch2_write_super(struct bch_fs *c)
|
||||
{
|
||||
struct closure *cl = &c->sb_write;
|
||||
struct bch_dev *ca;
|
||||
struct printbuf err = PRINTBUF;
|
||||
unsigned i, sb = 0, nr_wrote;
|
||||
struct bch_devs_mask sb_written;
|
||||
bool wrote, can_mount_without_written, can_mount_with_written;
|
||||
@ -795,18 +790,11 @@ int bch2_write_super(struct bch_fs *c)
|
||||
bch2_sb_from_fs(c, ca);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
struct printbuf buf = { NULL, NULL };
|
||||
printbuf_reset(&err);
|
||||
|
||||
ret = bch2_sb_validate(&ca->disk_sb, &buf);
|
||||
ret = bch2_sb_validate(&ca->disk_sb, &err);
|
||||
if (ret) {
|
||||
char *_buf = kmalloc(4096, GFP_NOFS);
|
||||
if (_buf) {
|
||||
buf = _PBUF(_buf, 4096);
|
||||
bch2_sb_validate(&ca->disk_sb, &buf);
|
||||
}
|
||||
|
||||
bch2_fs_inconsistent(c, "sb invalid before write: %s", _buf);
|
||||
kfree(_buf);
|
||||
bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
goto out;
|
||||
}
|
||||
@ -897,6 +885,7 @@ int bch2_write_super(struct bch_fs *c)
|
||||
out:
|
||||
/* Make new options visible after they're persistent: */
|
||||
bch2_sb_update(c);
|
||||
printbuf_exit(&err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -870,12 +870,9 @@ noinline_for_stack
|
||||
static void print_mount_opts(struct bch_fs *c)
|
||||
{
|
||||
enum bch_opt_id i;
|
||||
char buf[512];
|
||||
struct printbuf p = PBUF(buf);
|
||||
struct printbuf p = PRINTBUF;
|
||||
bool first = true;
|
||||
|
||||
strcpy(buf, "(null)");
|
||||
|
||||
if (c->opts.read_only) {
|
||||
pr_buf(&p, "ro");
|
||||
first = false;
|
||||
@ -897,7 +894,11 @@ static void print_mount_opts(struct bch_fs *c)
|
||||
bch2_opt_to_text(&p, c, opt, v, OPT_SHOW_MOUNT_STYLE);
|
||||
}
|
||||
|
||||
bch_info(c, "mounted with opts: %s", buf);
|
||||
if (!p.pos)
|
||||
pr_buf(&p, "(null)");
|
||||
|
||||
bch_info(c, "mounted with opts: %s", p.buf);
|
||||
printbuf_exit(&p);
|
||||
}
|
||||
|
||||
int bch2_fs_start(struct bch_fs *c)
|
||||
@ -1561,11 +1562,11 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
||||
|
||||
data = bch2_dev_has_data(c, ca);
|
||||
if (data) {
|
||||
char data_has_str[100];
|
||||
struct printbuf data_has = PRINTBUF;
|
||||
|
||||
bch2_flags_to_text(&PBUF(data_has_str),
|
||||
bch2_data_types, data);
|
||||
bch_err(ca, "Remove failed, still has data (%s)", data_has_str);
|
||||
bch2_flags_to_text(&data_has, bch2_data_types, data);
|
||||
bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
|
||||
printbuf_exit(&data_has);
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
@ -1614,16 +1615,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
struct bch_sb_field_members *mi;
|
||||
struct bch_member dev_mi;
|
||||
unsigned dev_idx, nr_devices, u64s;
|
||||
char *_errbuf;
|
||||
struct printbuf errbuf;
|
||||
struct printbuf errbuf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
_errbuf = kmalloc(4096, GFP_KERNEL);
|
||||
if (!_errbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
errbuf = _PBUF(_errbuf, 4096);
|
||||
|
||||
ret = bch2_read_super(path, &opts, &sb);
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: error reading super: %i", ret);
|
||||
@ -1741,7 +1735,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
if (ca)
|
||||
bch2_dev_free(ca);
|
||||
bch2_free_super(&sb);
|
||||
kfree(_errbuf);
|
||||
printbuf_exit(&errbuf);
|
||||
return ret;
|
||||
err_late:
|
||||
up_write(&c->state_lock);
|
||||
@ -1906,8 +1900,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
|
||||
struct bch_sb_field_members *mi;
|
||||
unsigned i, best_sb = 0;
|
||||
const char *err;
|
||||
char *_errbuf = NULL;
|
||||
struct printbuf errbuf;
|
||||
struct printbuf errbuf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
@ -1920,14 +1913,6 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
|
||||
goto err;
|
||||
}
|
||||
|
||||
_errbuf = kmalloc(4096, GFP_KERNEL);
|
||||
if (!_errbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
errbuf = _PBUF(_errbuf, 4096);
|
||||
|
||||
sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
|
||||
if (!sb) {
|
||||
ret = -ENOMEM;
|
||||
@ -1991,7 +1976,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
|
||||
}
|
||||
out:
|
||||
kfree(sb);
|
||||
kfree(_errbuf);
|
||||
printbuf_exit(&errbuf);
|
||||
module_put(THIS_MODULE);
|
||||
pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
|
||||
return c;
|
||||
|
@ -46,8 +46,28 @@ struct sysfs_ops type ## _sysfs_ops = { \
|
||||
}
|
||||
|
||||
#define SHOW(fn) \
|
||||
static ssize_t fn ## _to_text(struct printbuf *, \
|
||||
struct kobject *, struct attribute *);\
|
||||
\
|
||||
static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
|
||||
char *buf) \
|
||||
{ \
|
||||
struct printbuf out = PRINTBUF; \
|
||||
ssize_t ret = fn ## _to_text(&out, kobj, attr); \
|
||||
\
|
||||
if (!ret && out.allocation_failure) \
|
||||
ret = -ENOMEM; \
|
||||
\
|
||||
if (!ret) { \
|
||||
ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
|
||||
memcpy(buf, out.buf, ret); \
|
||||
} \
|
||||
printbuf_exit(&out); \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
|
||||
struct attribute *attr)
|
||||
|
||||
#define STORE(fn) \
|
||||
static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
|
||||
@ -64,22 +84,19 @@ static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
|
||||
#define sysfs_printf(file, fmt, ...) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
|
||||
pr_buf(out, fmt "\n", __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define sysfs_print(file, var) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
return snprint(buf, PAGE_SIZE, var); \
|
||||
snprint(out, var); \
|
||||
} while (0)
|
||||
|
||||
#define sysfs_hprint(file, val) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) { \
|
||||
bch2_hprint(&out, val); \
|
||||
pr_buf(&out, "\n"); \
|
||||
return out.pos - buf; \
|
||||
} \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
bch2_hprint(out, val); \
|
||||
} while (0)
|
||||
|
||||
#define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
|
||||
@ -348,7 +365,6 @@ static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
SHOW(bch2_fs)
|
||||
{
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
|
||||
sysfs_print(minor, c->minor);
|
||||
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
|
||||
@ -365,10 +381,8 @@ SHOW(bch2_fs)
|
||||
|
||||
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
|
||||
|
||||
if (attr == &sysfs_gc_gens_pos) {
|
||||
bch2_gc_gens_pos_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_gc_gens_pos)
|
||||
bch2_gc_gens_pos_to_text(out, c);
|
||||
|
||||
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
|
||||
|
||||
@ -378,83 +392,54 @@ SHOW(bch2_fs)
|
||||
max(0LL, c->copygc_wait -
|
||||
atomic64_read(&c->io_clock[WRITE].now)) << 9);
|
||||
|
||||
if (attr == &sysfs_rebalance_work) {
|
||||
bch2_rebalance_work_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_rebalance_work)
|
||||
bch2_rebalance_work_to_text(out, c);
|
||||
|
||||
sysfs_print(promote_whole_extents, c->promote_whole_extents);
|
||||
|
||||
/* Debugging: */
|
||||
|
||||
if (attr == &sysfs_journal_debug) {
|
||||
bch2_journal_debug_to_text(&out, &c->journal);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_journal_debug)
|
||||
bch2_journal_debug_to_text(out, &c->journal);
|
||||
|
||||
if (attr == &sysfs_journal_pins) {
|
||||
bch2_journal_pins_to_text(&out, &c->journal);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_journal_pins)
|
||||
bch2_journal_pins_to_text(out, &c->journal);
|
||||
|
||||
if (attr == &sysfs_btree_updates) {
|
||||
bch2_btree_updates_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_btree_updates)
|
||||
bch2_btree_updates_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_dirty_btree_nodes) {
|
||||
bch2_dirty_btree_nodes_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_dirty_btree_nodes)
|
||||
bch2_dirty_btree_nodes_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_btree_cache) {
|
||||
bch2_btree_cache_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_btree_cache)
|
||||
bch2_btree_cache_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_btree_key_cache) {
|
||||
bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_btree_key_cache)
|
||||
bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
|
||||
|
||||
if (attr == &sysfs_btree_transactions) {
|
||||
bch2_btree_trans_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_btree_transactions)
|
||||
bch2_btree_trans_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_stripes_heap) {
|
||||
bch2_stripes_heap_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_stripes_heap)
|
||||
bch2_stripes_heap_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_open_buckets) {
|
||||
bch2_open_buckets_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_open_buckets)
|
||||
bch2_open_buckets_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_compression_stats) {
|
||||
bch2_compression_stats_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_compression_stats)
|
||||
bch2_compression_stats_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_new_stripes) {
|
||||
bch2_new_stripes_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_new_stripes)
|
||||
bch2_new_stripes_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_io_timers_read) {
|
||||
bch2_io_timers_to_text(&out, &c->io_clock[READ]);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_io_timers_write) {
|
||||
bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_io_timers_read)
|
||||
bch2_io_timers_to_text(out, &c->io_clock[READ]);
|
||||
|
||||
if (attr == &sysfs_data_jobs) {
|
||||
data_progress_to_text(&out, c);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_io_timers_write)
|
||||
bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
|
||||
|
||||
if (attr == &sysfs_data_jobs)
|
||||
data_progress_to_text(out, c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -567,7 +552,7 @@ struct attribute *bch2_fs_files[] = {
|
||||
SHOW(bch2_fs_internal)
|
||||
{
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
|
||||
return bch2_fs_show(&c->kobj, attr, buf);
|
||||
return bch2_fs_to_text(out, &c->kobj, attr);
|
||||
}
|
||||
|
||||
STORE(bch2_fs_internal)
|
||||
@ -617,16 +602,15 @@ struct attribute *bch2_fs_internal_files[] = {
|
||||
|
||||
SHOW(bch2_fs_opts_dir)
|
||||
{
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
|
||||
const struct bch_option *opt = container_of(attr, struct bch_option, attr);
|
||||
int id = opt - bch2_opt_table;
|
||||
u64 v = bch2_opt_get_by_id(&c->opts, id);
|
||||
|
||||
bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
|
||||
pr_buf(&out, "\n");
|
||||
bch2_opt_to_text(out, c, opt, v, OPT_SHOW_FULL_LIST);
|
||||
pr_char(out, '\n');
|
||||
|
||||
return out.pos - buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
STORE(bch2_fs_opts_dir)
|
||||
@ -690,13 +674,10 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
|
||||
SHOW(bch2_fs_time_stats)
|
||||
{
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
|
||||
#define x(name) \
|
||||
if (attr == &sysfs_time_stat_##name) { \
|
||||
bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
|
||||
return out.pos - buf; \
|
||||
}
|
||||
if (attr == &sysfs_time_stat_##name) \
|
||||
bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
|
||||
BCH_TIME_STATS()
|
||||
#undef x
|
||||
|
||||
@ -812,7 +793,6 @@ SHOW(bch2_dev)
|
||||
{
|
||||
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
|
||||
struct bch_fs *c = ca->fs;
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
|
||||
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
|
||||
|
||||
@ -825,58 +805,47 @@ SHOW(bch2_dev)
|
||||
if (attr == &sysfs_label) {
|
||||
if (ca->mi.group) {
|
||||
mutex_lock(&c->sb_lock);
|
||||
bch2_disk_path_to_text(&out, c->disk_sb.sb,
|
||||
bch2_disk_path_to_text(out, c->disk_sb.sb,
|
||||
ca->mi.group - 1);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
}
|
||||
|
||||
pr_buf(&out, "\n");
|
||||
return out.pos - buf;
|
||||
pr_char(out, '\n');
|
||||
}
|
||||
|
||||
if (attr == &sysfs_has_data) {
|
||||
bch2_flags_to_text(&out, bch2_data_types,
|
||||
bch2_flags_to_text(out, bch2_data_types,
|
||||
bch2_dev_has_data(c, ca));
|
||||
pr_buf(&out, "\n");
|
||||
return out.pos - buf;
|
||||
pr_char(out, '\n');
|
||||
}
|
||||
|
||||
if (attr == &sysfs_state_rw) {
|
||||
bch2_string_opt_to_text(&out, bch2_member_states,
|
||||
bch2_string_opt_to_text(out, bch2_member_states,
|
||||
ca->mi.state);
|
||||
pr_buf(&out, "\n");
|
||||
return out.pos - buf;
|
||||
pr_char(out, '\n');
|
||||
}
|
||||
|
||||
if (attr == &sysfs_iodone) {
|
||||
dev_iodone_to_text(&out, ca);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_iodone)
|
||||
dev_iodone_to_text(out, ca);
|
||||
|
||||
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
|
||||
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
|
||||
|
||||
if (attr == &sysfs_io_latency_stats_read) {
|
||||
bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_io_latency_stats_write) {
|
||||
bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_io_latency_stats_read)
|
||||
bch2_time_stats_to_text(out, &ca->io_latency[READ]);
|
||||
|
||||
if (attr == &sysfs_io_latency_stats_write)
|
||||
bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
|
||||
|
||||
sysfs_printf(congested, "%u%%",
|
||||
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
|
||||
* 100 / CONGESTED_MAX);
|
||||
|
||||
if (attr == &sysfs_reserve_stats) {
|
||||
reserve_stats_to_text(&out, ca);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_alloc_debug) {
|
||||
dev_alloc_debug_to_text(&out, ca);
|
||||
return out.pos - buf;
|
||||
}
|
||||
if (attr == &sysfs_reserve_stats)
|
||||
reserve_stats_to_text(out, ca);
|
||||
|
||||
if (attr == &sysfs_alloc_debug)
|
||||
dev_alloc_debug_to_text(out, ca);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -871,7 +871,9 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
|
||||
u64 nr, unsigned nr_threads)
|
||||
{
|
||||
struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
|
||||
char name_buf[20], nr_buf[20], per_sec_buf[20];
|
||||
char name_buf[20];
|
||||
struct printbuf nr_buf = PRINTBUF;
|
||||
struct printbuf per_sec_buf = PRINTBUF;
|
||||
unsigned i;
|
||||
u64 time;
|
||||
|
||||
@ -932,13 +934,15 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
|
||||
time = j.finish - j.start;
|
||||
|
||||
scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
|
||||
bch2_hprint(&PBUF(nr_buf), nr);
|
||||
bch2_hprint(&PBUF(per_sec_buf), div64_u64(nr * NSEC_PER_SEC, time));
|
||||
bch2_hprint(&nr_buf, nr);
|
||||
bch2_hprint(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
|
||||
printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
|
||||
name_buf, nr_buf, nr_threads,
|
||||
name_buf, nr_buf.buf, nr_threads,
|
||||
div_u64(time, NSEC_PER_SEC),
|
||||
div_u64(time * nr_threads, nr),
|
||||
per_sec_buf);
|
||||
per_sec_buf.buf);
|
||||
printbuf_exit(&per_sec_buf);
|
||||
printbuf_exit(&nr_buf);
|
||||
return j.ret;
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,38 @@ STRTO_H(strtoll, long long)
|
||||
STRTO_H(strtoull, unsigned long long)
|
||||
STRTO_H(strtou64, u64)
|
||||
|
||||
static int bch2_printbuf_realloc(struct printbuf *out, unsigned extra)
|
||||
{
|
||||
unsigned new_size = roundup_pow_of_two(out->size + extra);
|
||||
char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_ATOMIC);
|
||||
|
||||
if (!buf) {
|
||||
out->allocation_failure = true;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
out->buf = buf;
|
||||
out->size = new_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_pr_buf(struct printbuf *out, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int len;
|
||||
|
||||
do {
|
||||
va_start(args, fmt);
|
||||
len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args);
|
||||
va_end(args);
|
||||
} while (len + 1 >= printbuf_remaining(out) &&
|
||||
!bch2_printbuf_realloc(out, len + 1));
|
||||
|
||||
len = min_t(size_t, len,
|
||||
printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
|
||||
out->pos += len;
|
||||
}
|
||||
|
||||
void bch2_hprint(struct printbuf *buf, s64 v)
|
||||
{
|
||||
int u, t = 0;
|
||||
@ -151,9 +183,6 @@ void bch2_flags_to_text(struct printbuf *out,
|
||||
unsigned bit, nr = 0;
|
||||
bool first = true;
|
||||
|
||||
if (out->pos != out->end)
|
||||
*out->pos = '\0';
|
||||
|
||||
while (list[nr])
|
||||
nr++;
|
||||
|
||||
|
@ -242,19 +242,39 @@ enum printbuf_units {
|
||||
};
|
||||
|
||||
struct printbuf {
|
||||
char *pos;
|
||||
char *end;
|
||||
char *last_newline;
|
||||
char *last_field;
|
||||
char *buf;
|
||||
unsigned size;
|
||||
unsigned pos;
|
||||
unsigned last_newline;
|
||||
unsigned last_field;
|
||||
unsigned indent;
|
||||
enum printbuf_units units;
|
||||
unsigned tabstop;
|
||||
unsigned tabstops[4];
|
||||
enum printbuf_units units:8;
|
||||
u8 atomic;
|
||||
bool allocation_failure:1;
|
||||
u8 tabstop;
|
||||
u8 tabstops[4];
|
||||
};
|
||||
|
||||
#define PRINTBUF ((struct printbuf) { NULL })
|
||||
|
||||
static inline void printbuf_exit(struct printbuf *buf)
|
||||
{
|
||||
kfree(buf->buf);
|
||||
buf->buf = ERR_PTR(-EINTR); /* poison value */
|
||||
}
|
||||
|
||||
static inline void printbuf_reset(struct printbuf *buf)
|
||||
{
|
||||
buf->pos = 0;
|
||||
buf->last_newline = 0;
|
||||
buf->last_field = 0;
|
||||
buf->indent = 0;
|
||||
buf->tabstop = 0;
|
||||
}
|
||||
|
||||
static inline size_t printbuf_remaining(struct printbuf *buf)
|
||||
{
|
||||
return buf->end - buf->pos;
|
||||
return buf->size - buf->pos;
|
||||
}
|
||||
|
||||
static inline size_t printbuf_linelen(struct printbuf *buf)
|
||||
@ -262,29 +282,13 @@ static inline size_t printbuf_linelen(struct printbuf *buf)
|
||||
return buf->pos - buf->last_newline;
|
||||
}
|
||||
|
||||
#define _PBUF(_buf, _len) \
|
||||
((struct printbuf) { \
|
||||
.pos = _buf, \
|
||||
.end = _buf + _len, \
|
||||
.last_newline = _buf, \
|
||||
.last_field = _buf, \
|
||||
})
|
||||
void bch2_pr_buf(struct printbuf *out, const char *fmt, ...);
|
||||
|
||||
#define PBUF(_buf) _PBUF(_buf, sizeof(_buf))
|
||||
|
||||
|
||||
#define pr_buf(_out, ...) \
|
||||
do { \
|
||||
(_out)->pos += scnprintf((_out)->pos, printbuf_remaining(_out), \
|
||||
__VA_ARGS__); \
|
||||
} while (0)
|
||||
#define pr_buf(_out, ...) bch2_pr_buf(_out, __VA_ARGS__)
|
||||
|
||||
static inline void pr_char(struct printbuf *out, char c)
|
||||
{
|
||||
if (printbuf_remaining(out) > 1) {
|
||||
*out->pos = c;
|
||||
out->pos++;
|
||||
}
|
||||
bch2_pr_buf(out, "%c", c);
|
||||
}
|
||||
|
||||
static inline void pr_indent_push(struct printbuf *buf, unsigned spaces)
|
||||
@ -298,7 +302,7 @@ static inline void pr_indent_pop(struct printbuf *buf, unsigned spaces)
|
||||
{
|
||||
if (buf->last_newline + buf->indent == buf->pos) {
|
||||
buf->pos -= spaces;
|
||||
buf->pos = '\0';
|
||||
buf->buf[buf->pos] = '\0';
|
||||
}
|
||||
buf->indent -= spaces;
|
||||
}
|
||||
@ -341,12 +345,12 @@ static inline void pr_tab_rjust(struct printbuf *buf)
|
||||
BUG_ON(buf->tabstop > ARRAY_SIZE(buf->tabstops));
|
||||
|
||||
if (shift > 0) {
|
||||
memmove(buf->last_field + shift,
|
||||
buf->last_field,
|
||||
memmove(buf->buf + buf->last_field + shift,
|
||||
buf->buf + buf->last_field,
|
||||
move);
|
||||
memset(buf->last_field, ' ', shift);
|
||||
memset(buf->buf + buf->last_field, ' ', shift);
|
||||
buf->pos += shift;
|
||||
*buf->pos = 0;
|
||||
buf->buf[buf->pos] = 0;
|
||||
}
|
||||
|
||||
buf->last_field = buf->pos;
|
||||
@ -460,8 +464,8 @@ static inline int bch2_strtoul_h(const char *cp, long *res)
|
||||
_r; \
|
||||
})
|
||||
|
||||
#define snprint(buf, size, var) \
|
||||
snprintf(buf, size, \
|
||||
#define snprint(out, var) \
|
||||
pr_buf(out, \
|
||||
type_is(var, int) ? "%i\n" \
|
||||
: type_is(var, unsigned) ? "%u\n" \
|
||||
: type_is(var, long) ? "%li\n" \
|
||||
@ -605,10 +609,8 @@ do { \
|
||||
sysfs_print(name##_rate_d_term, (var)->d_term); \
|
||||
sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
|
||||
\
|
||||
if (attr == &sysfs_##name##_rate_debug) { \
|
||||
bch2_pd_controller_debug_to_text(&out, var); \
|
||||
return out.pos - buf; \
|
||||
} \
|
||||
if (attr == &sysfs_##name##_rate_debug) \
|
||||
bch2_pd_controller_debug_to_text(out, var); \
|
||||
} while (0)
|
||||
|
||||
#define sysfs_pd_controller_store(name, var) \
|
||||
|
@ -426,9 +426,8 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
|
||||
bch2_inode_opts_to_opts(bch2_inode_opts_get(&inode->ei_inode));
|
||||
const struct bch_option *opt;
|
||||
int id, inode_opt_id;
|
||||
char buf[512];
|
||||
struct printbuf out = PBUF(buf);
|
||||
unsigned val_len;
|
||||
struct printbuf out = PRINTBUF;
|
||||
int ret;
|
||||
u64 v;
|
||||
|
||||
id = bch2_opt_lookup(name);
|
||||
@ -451,14 +450,19 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
|
||||
v = bch2_opt_get_by_id(&opts, id);
|
||||
bch2_opt_to_text(&out, c, opt, v, 0);
|
||||
|
||||
val_len = out.pos - buf;
|
||||
ret = out.pos;
|
||||
|
||||
if (buffer && val_len > size)
|
||||
return -ERANGE;
|
||||
if (out.allocation_failure) {
|
||||
ret = -ENOMEM;
|
||||
} else if (buffer) {
|
||||
if (out.pos > size)
|
||||
ret = -ERANGE;
|
||||
else
|
||||
memcpy(buffer, out.buf, out.pos);
|
||||
}
|
||||
|
||||
if (buffer)
|
||||
memcpy(buffer, buf, val_len);
|
||||
return val_len;
|
||||
printbuf_exit(&out);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
|
||||
|
Loading…
Reference in New Issue
Block a user