bcachefs: New bpos_cmp(), bkey_cmp() replacements

This patch introduces
 - bpos_eq()
 - bpos_lt()
 - bpos_le()
 - bpos_gt()
 - bpos_ge()

and equivalent replacements for bkey_cmp().

Looking at the generated assembly these could probably be improved
further, but we already see a significant code size improvement with
this patch.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-11-24 03:12:22 -05:00
parent e153821259
commit e88a75ebe8
31 changed files with 233 additions and 178 deletions

View File

@ -982,7 +982,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
goto out; goto out;
} }
if (bkey_cmp(*discard_pos_done, iter.pos) && if (!bkey_eq(*discard_pos_done, iter.pos) &&
ca->mi.discard && !c->opts.nochanges) { ca->mi.discard && !c->opts.nochanges) {
/* /*
* This works without any other locks because this is the only * This works without any other locks because this is the only

View File

@ -399,7 +399,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
BTREE_ITER_SLOTS, k, ret) { BTREE_ITER_SLOTS, k, ret) {
struct bch_alloc_v4 a; struct bch_alloc_v4 a;
if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break; break;
if (ca->new_fs_bucket_idx && if (ca->new_fs_bucket_idx &&

View File

@ -144,6 +144,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
return bkey_cmp_left_packed(b, l, &r); return bkey_cmp_left_packed(b, l, &r);
} }
static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
{
return !((l.inode ^ r.inode) |
(l.offset ^ r.offset) |
(l.snapshot ^ r.snapshot));
}
static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
{
return l.inode != r.inode ? l.inode < r.inode :
l.offset != r.offset ? l.offset < r.offset :
l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
}
static __always_inline bool bpos_le(struct bpos l, struct bpos r)
{
return l.inode != r.inode ? l.inode < r.inode :
l.offset != r.offset ? l.offset < r.offset :
l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
}
static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
{
return bpos_lt(r, l);
}
static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
{
return bpos_le(r, l);
}
static __always_inline int bpos_cmp(struct bpos l, struct bpos r) static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
{ {
return cmp_int(l.inode, r.inode) ?: return cmp_int(l.inode, r.inode) ?:
@ -151,6 +182,36 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
cmp_int(l.snapshot, r.snapshot); cmp_int(l.snapshot, r.snapshot);
} }
static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
{
return !((l.inode ^ r.inode) |
(l.offset ^ r.offset));
}
static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
{
return l.inode != r.inode
? l.inode < r.inode
: l.offset < r.offset;
}
static __always_inline bool bkey_le(struct bpos l, struct bpos r)
{
return l.inode != r.inode
? l.inode < r.inode
: l.offset <= r.offset;
}
static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
{
return bkey_lt(r, l);
}
static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
{
return bkey_le(r, l);
}
static __always_inline int bkey_cmp(struct bpos l, struct bpos r) static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{ {
return cmp_int(l.inode, r.inode) ?: return cmp_int(l.inode, r.inode) ?:
@ -159,12 +220,12 @@ static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
static inline struct bpos bpos_min(struct bpos l, struct bpos r) static inline struct bpos bpos_min(struct bpos l, struct bpos r)
{ {
return bpos_cmp(l, r) < 0 ? l : r; return bpos_lt(l, r) ? l : r;
} }
static inline struct bpos bpos_max(struct bpos l, struct bpos r) static inline struct bpos bpos_max(struct bpos l, struct bpos r)
{ {
return bpos_cmp(l, r) > 0 ? l : r; return bpos_gt(l, r) ? l : r;
} }
void bch2_bpos_swab(struct bpos *); void bch2_bpos_swab(struct bpos *);

View File

@ -245,7 +245,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
} }
if (type != BKEY_TYPE_btree && if (type != BKEY_TYPE_btree &&
!bkey_cmp(k.k->p, POS_MAX)) { bkey_eq(k.k->p, POS_MAX)) {
prt_printf(err, "key at POS_MAX"); prt_printf(err, "key at POS_MAX");
return -EINVAL; return -EINVAL;
} }
@ -264,12 +264,12 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
struct printbuf *err) struct printbuf *err)
{ {
if (bpos_cmp(k.k->p, b->data->min_key) < 0) { if (bpos_lt(k.k->p, b->data->min_key)) {
prt_printf(err, "key before start of btree node"); prt_printf(err, "key before start of btree node");
return -EINVAL; return -EINVAL;
} }
if (bpos_cmp(k.k->p, b->data->max_key) > 0) { if (bpos_gt(k.k->p, b->data->max_key)) {
prt_printf(err, "key past end of btree node"); prt_printf(err, "key past end of btree node");
return -EINVAL; return -EINVAL;
} }
@ -279,11 +279,11 @@ int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos) void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
{ {
if (!bpos_cmp(pos, POS_MIN)) if (bpos_eq(pos, POS_MIN))
prt_printf(out, "POS_MIN"); prt_printf(out, "POS_MIN");
else if (!bpos_cmp(pos, POS_MAX)) else if (bpos_eq(pos, POS_MAX))
prt_printf(out, "POS_MAX"); prt_printf(out, "POS_MAX");
else if (!bpos_cmp(pos, SPOS_MAX)) else if (bpos_eq(pos, SPOS_MAX))
prt_printf(out, "SPOS_MAX"); prt_printf(out, "SPOS_MAX");
else { else {
if (pos.inode == U64_MAX) if (pos.inode == U64_MAX)

View File

@ -60,7 +60,7 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
{ {
return l->type == r->type && return l->type == r->type &&
!bversion_cmp(l->version, r->version) && !bversion_cmp(l->version, r->version) &&
!bpos_cmp(l->p, bkey_start_pos(r)); bpos_eq(l->p, bkey_start_pos(r));
} }
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);

View File

@ -83,13 +83,12 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
n = bkey_unpack_key(b, _n); n = bkey_unpack_key(b, _n);
if (bpos_cmp(n.p, k.k->p) < 0) { if (bpos_lt(n.p, k.k->p)) {
printk(KERN_ERR "Key skipped backwards\n"); printk(KERN_ERR "Key skipped backwards\n");
continue; continue;
} }
if (!bkey_deleted(k.k) && if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
!bpos_cmp(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n"); printk(KERN_ERR "Duplicate keys\n");
} }
@ -530,7 +529,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
goto start; goto start;
while (1) { while (1) {
if (rw_aux_to_bkey(b, t, j) == k) { if (rw_aux_to_bkey(b, t, j) == k) {
BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k, BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
bkey_unpack_pos(b, k))); bkey_unpack_pos(b, k)));
start: start:
if (++j == t->size) if (++j == t->size)
@ -1065,7 +1064,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
while (l + 1 != r) { while (l + 1 != r) {
unsigned m = (l + r) >> 1; unsigned m = (l + r) >> 1;
if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0) if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
l = m; l = m;
else else
r = m; r = m;
@ -1318,8 +1317,8 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
struct bkey_packed *k[MAX_BSETS]; struct bkey_packed *k[MAX_BSETS];
unsigned i; unsigned i;
EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0); EBUG_ON(bpos_lt(*search, b->data->min_key));
EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0); EBUG_ON(bpos_gt(*search, b->data->max_key));
bset_aux_tree_verify(b); bset_aux_tree_verify(b);
memset(iter, 0, sizeof(*iter)); memset(iter, 0, sizeof(*iter));

View File

@ -793,9 +793,9 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
{ {
if (b->c.btree_id != BTREE_NODE_ID(b->data) || if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
b->c.level != BTREE_NODE_LEVEL(b->data) || b->c.level != BTREE_NODE_LEVEL(b->data) ||
bpos_cmp(b->data->max_key, b->key.k.p) || !bpos_eq(b->data->max_key, b->key.k.p) ||
(b->key.k.type == KEY_TYPE_btree_ptr_v2 && (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
bpos_cmp(b->data->min_key, !bpos_eq(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
btree_bad_header(c, b); btree_bad_header(c, b);
} }

View File

@ -76,7 +76,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k); struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
if (bpos_cmp(expected_start, bp->v.min_key)) { if (!bpos_eq(expected_start, bp->v.min_key)) {
bch2_topology_error(c); bch2_topology_error(c);
if (bkey_deleted(&prev->k->k)) { if (bkey_deleted(&prev->k->k)) {
@ -106,7 +106,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
} }
} }
if (is_last && bpos_cmp(cur.k->k.p, node_end)) { if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
bch2_topology_error(c); bch2_topology_error(c);
printbuf_reset(&buf1); printbuf_reset(&buf1);
@ -274,12 +274,12 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key)); bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
if (prev && if (prev &&
bpos_cmp(expected_start, cur->data->min_key) > 0 && bpos_gt(expected_start, cur->data->min_key) &&
BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
/* cur overwrites prev: */ /* cur overwrites prev: */
if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key, if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
cur->data->min_key) >= 0, c, cur->data->min_key), c,
"btree node overwritten by next node at btree %s level %u:\n" "btree node overwritten by next node at btree %s level %u:\n"
" node %s\n" " node %s\n"
" next %s", " next %s",
@ -289,7 +289,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
goto out; goto out;
} }
if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p, if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c, bpos_predecessor(cur->data->min_key)), c,
"btree node with incorrect max_key at btree %s level %u:\n" "btree node with incorrect max_key at btree %s level %u:\n"
" node %s\n" " node %s\n"
@ -301,8 +301,8 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
} else { } else {
/* prev overwrites cur: */ /* prev overwrites cur: */
if (mustfix_fsck_err_on(bpos_cmp(expected_start, if (mustfix_fsck_err_on(bpos_ge(expected_start,
cur->data->max_key) >= 0, c, cur->data->max_key), c,
"btree node overwritten by prev node at btree %s level %u:\n" "btree node overwritten by prev node at btree %s level %u:\n"
" prev %s\n" " prev %s\n"
" node %s", " node %s",
@ -312,7 +312,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
goto out; goto out;
} }
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c, if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n" "btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n" " prev %s\n"
" node %s", " node %s",
@ -336,7 +336,7 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key)); bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
bch2_bpos_to_text(&buf2, b->key.k.p); bch2_bpos_to_text(&buf2, b->key.k.p);
if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c, if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
"btree node with incorrect max_key at btree %s level %u:\n" "btree node with incorrect max_key at btree %s level %u:\n"
" %s\n" " %s\n"
" expected %s", " expected %s",
@ -374,8 +374,8 @@ static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); BUG_ON(bpos_lt(k.k->p, b->data->min_key));
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); BUG_ON(bpos_gt(k.k->p, b->data->max_key));
bch2_btree_and_journal_iter_advance(&iter); bch2_btree_and_journal_iter_advance(&iter);
bch2_bkey_buf_reassemble(&cur_k, c, k); bch2_bkey_buf_reassemble(&cur_k, c, k);
@ -912,8 +912,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
bkey_init(&prev.k->k); bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); BUG_ON(bpos_lt(k.k->p, b->data->min_key));
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); BUG_ON(bpos_gt(k.k->p, b->data->max_key));
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true); false, &k, true);
@ -1018,7 +1018,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
six_lock_read(&b->c.lock, NULL, NULL); six_lock_read(&b->c.lock, NULL, NULL);
printbuf_reset(&buf); printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->min_key); bch2_bpos_to_text(&buf, b->data->min_key);
if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %s", buf.buf)) { "btree root with incorrect min_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented"); bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented; ret = -BCH_ERR_fsck_repair_unimplemented;
@ -1027,7 +1027,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
printbuf_reset(&buf); printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->max_key); bch2_bpos_to_text(&buf, b->data->max_key);
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c, if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
"btree root with incorrect max_key: %s", buf.buf)) { "btree root with incorrect max_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented"); bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented; ret = -BCH_ERR_fsck_repair_unimplemented;
@ -1341,7 +1341,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
enum bch_data_type type; enum bch_data_type type;
int ret; int ret;
if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
return 1; return 1;
bch2_alloc_to_v4(k, &old); bch2_alloc_to_v4(k, &old);

View File

@ -77,7 +77,7 @@ static void verify_no_dups(struct btree *b,
struct bkey l = bkey_unpack_key(b, p); struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k); struct bkey r = bkey_unpack_key(b, k);
BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0); BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
} }
#endif #endif
} }
@ -645,8 +645,8 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
bch2_btree_build_aux_trees(b); bch2_btree_build_aux_trees(b);
for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); BUG_ON(bpos_lt(k.k->p, b->data->min_key));
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); BUG_ON(bpos_gt(k.k->p, b->data->max_key));
} }
} }
@ -744,7 +744,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
b->data->max_key = b->key.k.p; b->data->max_key = b->key.k.p;
} }
btree_err_on(bpos_cmp(b->data->min_key, bp->min_key), btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, ca, b, NULL, BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
"incorrect min_key: got %s should be %s", "incorrect min_key: got %s should be %s",
(printbuf_reset(&buf1), (printbuf_reset(&buf1),
@ -753,7 +753,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
} }
btree_err_on(bpos_cmp(bn->max_key, b->key.k.p), btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, ca, b, i, BTREE_ERR_MUST_RETRY, c, ca, b, i,
"incorrect max key %s", "incorrect max key %s",
(printbuf_reset(&buf1), (printbuf_reset(&buf1),

View File

@ -201,7 +201,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
{ {
if (version < bcachefs_metadata_version_inode_btree_change && if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) && btree_node_type_is_extents(btree_id) &&
bpos_cmp(bn->min_key, POS_MIN) && !bpos_eq(bn->min_key, POS_MIN) &&
write) write)
bn->min_key = bpos_nosnap_predecessor(bn->min_key); bn->min_key = bpos_nosnap_predecessor(bn->min_key);
@ -218,7 +218,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
if (version < bcachefs_metadata_version_inode_btree_change && if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) && btree_node_type_is_extents(btree_id) &&
bpos_cmp(bn->min_key, POS_MIN) && !bpos_eq(bn->min_key, POS_MIN) &&
!write) !write)
bn->min_key = bpos_nosnap_successor(bn->min_key); bn->min_key = bpos_nosnap_successor(bn->min_key);
} }

View File

@ -93,7 +93,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
struct bpos pos = iter->pos; struct bpos pos = iter->pos;
if ((iter->flags & BTREE_ITER_IS_EXTENTS) && if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
bkey_cmp(pos, POS_MAX)) !bkey_eq(pos, POS_MAX))
pos = bkey_successor(iter, pos); pos = bkey_successor(iter, pos);
return pos; return pos;
} }
@ -101,13 +101,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
static inline bool btree_path_pos_before_node(struct btree_path *path, static inline bool btree_path_pos_before_node(struct btree_path *path,
struct btree *b) struct btree *b)
{ {
return bpos_cmp(path->pos, b->data->min_key) < 0; return bpos_lt(path->pos, b->data->min_key);
} }
static inline bool btree_path_pos_after_node(struct btree_path *path, static inline bool btree_path_pos_after_node(struct btree_path *path,
struct btree *b) struct btree *b)
{ {
return bpos_cmp(b->key.k.p, path->pos) < 0; return bpos_gt(path->pos, b->key.k.p);
} }
static inline bool btree_path_pos_in_node(struct btree_path *path, static inline bool btree_path_pos_in_node(struct btree_path *path,
@ -133,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
ck = (void *) path->l[0].b; ck = (void *) path->l[0].b;
BUG_ON(ck->key.btree_id != path->btree_id || BUG_ON(ck->key.btree_id != path->btree_id ||
bkey_cmp(ck->key.pos, path->pos)); !bkey_eq(ck->key.pos, path->pos));
if (!locked) if (!locked)
btree_node_unlock(trans, path, 0); btree_node_unlock(trans, path, 0);
@ -278,8 +278,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
iter->pos.snapshot != iter->snapshot); iter->pos.snapshot != iter->snapshot);
BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
bkey_cmp(iter->pos, iter->k.p) > 0); bkey_gt(iter->pos, iter->k.p));
} }
static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
@ -313,7 +313,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
if (ret) if (ret)
goto out; goto out;
if (!bkey_cmp(prev.k->p, k.k->p) && if (bkey_eq(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot, bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) { prev.k->p.snapshot) > 0) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
@ -355,11 +355,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
continue; continue;
if (!key_cache) { if (!key_cache) {
if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 && if (bkey_ge(pos, path->l[0].b->data->min_key) &&
bkey_cmp(pos, path->l[0].b->key.k.p) <= 0) bkey_le(pos, path->l[0].b->key.k.p))
return; return;
} else { } else {
if (!bkey_cmp(pos, path->pos)) if (bkey_eq(pos, path->pos))
return; return;
} }
} }
@ -1571,16 +1571,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b); _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0); EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
if (!k.k || bpos_cmp(path->pos, k.k->p)) if (!k.k || !bpos_eq(path->pos, k.k->p))
goto hole; goto hole;
} else { } else {
struct bkey_cached *ck = (void *) path->l[0].b; struct bkey_cached *ck = (void *) path->l[0].b;
EBUG_ON(ck && EBUG_ON(ck &&
(path->btree_id != ck->key.btree_id || (path->btree_id != ck->key.btree_id ||
bkey_cmp(path->pos, ck->key.pos))); !bkey_eq(path->pos, ck->key.pos)));
EBUG_ON(!ck || !ck->valid); EBUG_ON(!ck || !ck->valid);
*u = ck->k->k; *u = ck->k->k;
@ -1638,7 +1638,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
if (!b) if (!b)
goto out; goto out;
BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); BUG_ON(bpos_lt(b->key.k.p, iter->pos));
bkey_init(&iter->k); bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p; iter->k.p = iter->pos = b->key.k.p;
@ -1689,7 +1689,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
b = btree_path_node(path, path->level + 1); b = btree_path_node(path, path->level + 1);
if (!bpos_cmp(iter->pos, b->key.k.p)) { if (bpos_eq(iter->pos, b->key.k.p)) {
__btree_path_set_level_up(trans, path, path->level++); __btree_path_set_level_up(trans, path, path->level++);
} else { } else {
/* /*
@ -1732,9 +1732,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{ {
if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) { if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
struct bpos pos = iter->k.p; struct bpos pos = iter->k.p;
bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
? bpos_cmp(pos, SPOS_MAX) ? bpos_eq(pos, SPOS_MAX)
: bkey_cmp(pos, SPOS_MAX)) != 0; : bkey_eq(pos, SPOS_MAX));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(iter, pos); pos = bkey_successor(iter, pos);
@ -1752,9 +1752,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
inline bool bch2_btree_iter_rewind(struct btree_iter *iter) inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{ {
struct bpos pos = bkey_start_pos(&iter->k); struct bpos pos = bkey_start_pos(&iter->k);
bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
? bpos_cmp(pos, POS_MIN) ? bpos_eq(pos, POS_MIN)
: bkey_cmp(pos, POS_MIN)) != 0; : bkey_eq(pos, POS_MIN));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(iter, pos); pos = bkey_predecessor(iter, pos);
@ -1773,11 +1773,11 @@ struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
continue; continue;
if (i->btree_id > iter->btree_id) if (i->btree_id > iter->btree_id)
break; break;
if (bpos_cmp(i->k->k.p, iter->path->pos) < 0) if (bpos_lt(i->k->k.p, iter->path->pos))
continue; continue;
if (i->key_cache_already_flushed) if (i->key_cache_already_flushed)
continue; continue;
if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0) if (!ret || bpos_lt(i->k->k.p, ret->k.p))
ret = i->k; ret = i->k;
} }
@ -1797,7 +1797,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
{ {
struct bkey_i *k; struct bkey_i *k;
if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0) if (bpos_lt(iter->path->pos, iter->journal_pos))
iter->journal_idx = 0; iter->journal_idx = 0;
k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
@ -1936,8 +1936,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
next_update = btree_trans_peek_updates(iter); next_update = btree_trans_peek_updates(iter);
if (next_update && if (next_update &&
bpos_cmp(next_update->k.p, bpos_le(next_update->k.p,
k.k ? k.k->p : l->b->key.k.p) <= 0) { k.k ? k.k->p : l->b->key.k.p)) {
iter->k = next_update->k; iter->k = next_update->k;
k = bkey_i_to_s_c(next_update); k = bkey_i_to_s_c(next_update);
} }
@ -1950,7 +1950,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
* whiteout, with a real key at the same position, since * whiteout, with a real key at the same position, since
* in the btree deleted keys sort before non deleted. * in the btree deleted keys sort before non deleted.
*/ */
search_key = bpos_cmp(search_key, k.k->p) search_key = !bpos_eq(search_key, k.k->p)
? k.k->p ? k.k->p
: bpos_successor(k.k->p); : bpos_successor(k.k->p);
continue; continue;
@ -1958,7 +1958,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (likely(k.k)) { if (likely(k.k)) {
break; break;
} else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) { } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
/* Advance to next leaf node: */ /* Advance to next leaf node: */
search_key = bpos_successor(l->b->key.k.p); search_key = bpos_successor(l->b->key.k.p);
} else { } else {
@ -2008,19 +2008,19 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
*/ */
if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
iter_pos = k.k->p; iter_pos = k.k->p;
else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) else if (bkey_gt(bkey_start_pos(k.k), iter->pos))
iter_pos = bkey_start_pos(k.k); iter_pos = bkey_start_pos(k.k);
else else
iter_pos = iter->pos; iter_pos = iter->pos;
if (bkey_cmp(iter_pos, end) > 0) { if (bkey_gt(iter_pos, end)) {
bch2_btree_iter_set_pos(iter, end); bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null; k = bkey_s_c_null;
goto out_no_locked; goto out_no_locked;
} }
if (iter->update_path && if (iter->update_path &&
bkey_cmp(iter->update_path->pos, k.k->p)) { !bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path, bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT); iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL; iter->update_path = NULL;
@ -2143,7 +2143,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
/* Check if we should go up to the parent node: */ /* Check if we should go up to the parent node: */
if (!k.k || if (!k.k ||
(iter->advanced && (iter->advanced &&
!bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) { bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p; iter->pos = path_l(iter->path)->b->key.k.p;
btree_path_set_level_up(trans, iter->path); btree_path_set_level_up(trans, iter->path);
iter->advanced = false; iter->advanced = false;
@ -2159,7 +2159,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->path->level != iter->min_depth && if (iter->path->level != iter->min_depth &&
(iter->advanced || (iter->advanced ||
!k.k || !k.k ||
bpos_cmp(iter->pos, k.k->p))) { !bpos_eq(iter->pos, k.k->p))) {
btree_path_set_level_down(trans, iter->path, iter->min_depth); btree_path_set_level_down(trans, iter->path, iter->min_depth);
iter->pos = bpos_successor(iter->pos); iter->pos = bpos_successor(iter->pos);
iter->advanced = false; iter->advanced = false;
@ -2170,7 +2170,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->path->level == iter->min_depth && if (iter->path->level == iter->min_depth &&
iter->advanced && iter->advanced &&
k.k && k.k &&
!bpos_cmp(iter->pos, k.k->p)) { bpos_eq(iter->pos, k.k->p)) {
iter->pos = bpos_successor(iter->pos); iter->pos = bpos_successor(iter->pos);
iter->advanced = false; iter->advanced = false;
continue; continue;
@ -2178,7 +2178,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->advanced && if (iter->advanced &&
iter->path->level == iter->min_depth && iter->path->level == iter->min_depth &&
bpos_cmp(k.k->p, iter->pos)) !bpos_eq(k.k->p, iter->pos))
iter->advanced = false; iter->advanced = false;
BUG_ON(iter->advanced); BUG_ON(iter->advanced);
@ -2248,8 +2248,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
&iter->path->l[0], &iter->k); &iter->path->l[0], &iter->k);
if (!k.k || if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS) ((iter->flags & BTREE_ITER_IS_EXTENTS)
? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0 ? bpos_ge(bkey_start_pos(k.k), search_key)
: bpos_cmp(k.k->p, search_key) > 0)) : bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, iter->path, k = btree_path_level_prev(trans, iter->path,
&iter->path->l[0], &iter->k); &iter->path->l[0], &iter->k);
@ -2263,7 +2263,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
* longer at the same _key_ (not pos), return * longer at the same _key_ (not pos), return
* that candidate * that candidate
*/ */
if (saved_path && bkey_cmp(k.k->p, saved_k.p)) { if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path, bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT); iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path; iter->path = saved_path;
@ -2298,7 +2298,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
} }
break; break;
} else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) { } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */ /* Advance to previous leaf node: */
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key); search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
} else { } else {
@ -2309,10 +2309,10 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
} }
} }
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
/* Extents can straddle iter->pos: */ /* Extents can straddle iter->pos: */
if (bkey_cmp(k.k->p, iter->pos) < 0) if (bkey_lt(k.k->p, iter->pos))
iter->pos = k.k->p; iter->pos = k.k->p;
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
@ -2377,7 +2377,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bkey_i *next_update; struct bkey_i *next_update;
if ((next_update = btree_trans_peek_updates(iter)) && if ((next_update = btree_trans_peek_updates(iter)) &&
!bpos_cmp(next_update->k.p, iter->pos)) { bpos_eq(next_update->k.p, iter->pos)) {
iter->k = next_update->k; iter->k = next_update->k;
k = bkey_i_to_s_c(next_update); k = bkey_i_to_s_c(next_update);
goto out; goto out;
@ -2433,7 +2433,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
next = k.k ? bkey_start_pos(k.k) : POS_MAX; next = k.k ? bkey_start_pos(k.k) : POS_MAX;
if (bkey_cmp(iter->pos, next) < 0) { if (bkey_lt(iter->pos, next)) {
bkey_init(&iter->k); bkey_init(&iter->k);
iter->k.p = iter->pos; iter->k.p = iter->pos;

View File

@ -478,7 +478,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
if (!(flags & BTREE_ITER_SLOTS)) if (!(flags & BTREE_ITER_SLOTS))
return bch2_btree_iter_peek_upto(iter, end); return bch2_btree_iter_peek_upto(iter, end);
if (bkey_cmp(iter->pos, end) > 0) if (bkey_gt(iter->pos, end))
return bkey_s_c_null; return bkey_s_c_null;
return bch2_btree_iter_peek_slot(iter); return bch2_btree_iter_peek_slot(iter);

View File

@ -27,8 +27,8 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
const struct bkey_cached *ck = obj; const struct bkey_cached *ck = obj;
const struct bkey_cached_key *key = arg->key; const struct bkey_cached_key *key = arg->key;
return cmp_int(ck->key.btree_id, key->btree_id) ?: return ck->key.btree_id != key->btree_id ||
bpos_cmp(ck->key.pos, key->pos); !bpos_eq(ck->key.pos, key->pos);
} }
static const struct rhashtable_params bch2_btree_key_cache_params = { static const struct rhashtable_params bch2_btree_key_cache_params = {
@ -476,7 +476,7 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree
BUG_ON(ret); BUG_ON(ret);
if (ck->key.btree_id != path->btree_id || if (ck->key.btree_id != path->btree_id ||
bpos_cmp(ck->key.pos, path->pos)) { !bpos_eq(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want); six_unlock_type(&ck->c.lock, lock_want);
goto retry; goto retry;
} }
@ -550,7 +550,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
return ret; return ret;
if (ck->key.btree_id != path->btree_id || if (ck->key.btree_id != path->btree_id ||
bpos_cmp(ck->key.pos, path->pos)) { !bpos_eq(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want); six_unlock_type(&ck->c.lock, lock_want);
goto retry; goto retry;
} }

View File

@ -71,7 +71,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
break; break;
bp = bkey_s_c_to_btree_ptr_v2(k); bp = bkey_s_c_to_btree_ptr_v2(k);
if (bpos_cmp(next_node, bp.v->min_key)) { if (!bpos_eq(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b); bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, next_node); bch2_bpos_to_text(&buf1, next_node);
bch2_bpos_to_text(&buf2, bp.v->min_key); bch2_bpos_to_text(&buf2, bp.v->min_key);
@ -81,7 +81,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
bch2_btree_node_iter_advance(&iter, b); bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) { if (bch2_btree_node_iter_end(&iter)) {
if (bpos_cmp(k.k->p, b->key.k.p)) { if (!bpos_eq(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b); bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, b->key.k.p); bch2_bpos_to_text(&buf1, b->key.k.p);
bch2_bpos_to_text(&buf2, k.k->p); bch2_bpos_to_text(&buf2, k.k->p);
@ -1328,7 +1328,7 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
while (!bch2_keylist_empty(keys)) { while (!bch2_keylist_empty(keys)) {
struct bkey_i *k = bch2_keylist_front(keys); struct bkey_i *k = bch2_keylist_front(keys);
if (bpos_cmp(k->k.p, b->key.k.p) > 0) if (bpos_gt(k->k.p, b->key.k.p))
break; break;
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k); bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
@ -1445,8 +1445,7 @@ static void btree_split_insert_keys(struct btree_update *as,
struct keylist *keys) struct keylist *keys)
{ {
if (!bch2_keylist_empty(keys) && if (!bch2_keylist_empty(keys) &&
bpos_cmp(bch2_keylist_front(keys)->k.p, bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
b->data->max_key) <= 0) {
struct btree_node_iter node_iter; struct btree_node_iter node_iter;
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
@ -1770,8 +1769,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
b = path->l[level].b; b = path->l[level].b;
if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) || if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
(sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) { (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
b->sib_u64s[sib] = U16_MAX; b->sib_u64s[sib] = U16_MAX;
return 0; return 0;
} }
@ -1804,7 +1803,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
next = m; next = m;
} }
if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bpos_to_text(&buf1, prev->data->max_key); bch2_bpos_to_text(&buf1, prev->data->max_key);
@ -2097,7 +2096,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
iter2.flags & BTREE_ITER_INTENT); iter2.flags & BTREE_ITER_INTENT);
BUG_ON(iter2.path->level != b->c.level); BUG_ON(iter2.path->level != b->c.level);
BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path); btree_path_set_level_up(trans, iter2.path);

View File

@ -92,8 +92,8 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
EBUG_ON(btree_node_just_written(b)); EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b))); EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k)); EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0); EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0); EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
EBUG_ON(insert->k.u64s > EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(trans->c, b)); bch_btree_keys_u64s_remaining(trans->c, b));
@ -257,7 +257,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
static inline void btree_insert_entry_checks(struct btree_trans *trans, static inline void btree_insert_entry_checks(struct btree_trans *trans,
struct btree_insert_entry *i) struct btree_insert_entry *i)
{ {
BUG_ON(bpos_cmp(i->k->k.p, i->path->pos)); BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
BUG_ON(i->cached != i->path->cached); BUG_ON(i->cached != i->path->cached);
BUG_ON(i->level != i->path->level); BUG_ON(i->level != i->path->level);
BUG_ON(i->btree_id != i->path->btree_id); BUG_ON(i->btree_id != i->path->btree_id);
@ -1141,7 +1141,7 @@ static noinline int __check_pos_snapshot_overwritten(struct btree_trans *trans,
if (!k.k) if (!k.k)
break; break;
if (bkey_cmp(pos, k.k->p)) if (!bkey_eq(pos, k.k->p))
break; break;
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) { if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
@ -1242,7 +1242,7 @@ int bch2_trans_update_extent(struct btree_trans *trans,
if (!k.k) if (!k.k)
goto out; goto out;
if (!bkey_cmp(k.k->p, bkey_start_pos(&insert->k))) { if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
if (bch2_bkey_maybe_mergable(k.k, &insert->k)) { if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
ret = extent_front_merge(trans, &iter, k, &insert, flags); ret = extent_front_merge(trans, &iter, k, &insert, flags);
if (ret) if (ret)
@ -1252,9 +1252,9 @@ int bch2_trans_update_extent(struct btree_trans *trans,
goto next; goto next;
} }
while (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) > 0) { while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
bool front_split = bkey_cmp(bkey_start_pos(k.k), start) < 0; bool front_split = bkey_lt(bkey_start_pos(k.k), start);
bool back_split = bkey_cmp(k.k->p, insert->k.p) > 0; bool back_split = bkey_gt(k.k->p, insert->k.p);
/* /*
* If we're going to be splitting a compressed extent, note it * If we're going to be splitting a compressed extent, note it
@ -1313,7 +1313,7 @@ int bch2_trans_update_extent(struct btree_trans *trans,
goto err; goto err;
} }
if (bkey_cmp(k.k->p, insert->k.p) <= 0) { if (bkey_le(k.k->p, insert->k.p)) {
update = bch2_trans_kmalloc(trans, sizeof(*update)); update = bch2_trans_kmalloc(trans, sizeof(*update));
if ((ret = PTR_ERR_OR_ZERO(update))) if ((ret = PTR_ERR_OR_ZERO(update)))
goto err; goto err;
@ -1407,7 +1407,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, btree_id, pos, for_each_btree_key_norestart(trans, iter, btree_id, pos,
BTREE_ITER_ALL_SNAPSHOTS| BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_NOPRESERVE, k, ret) { BTREE_ITER_NOPRESERVE, k, ret) {
if (bkey_cmp(k.k->p, pos)) if (!bkey_eq(k.k->p, pos))
break; break;
if (bch2_snapshot_is_ancestor(trans->c, snapshot, if (bch2_snapshot_is_ancestor(trans->c, snapshot,
@ -1463,7 +1463,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
EBUG_ON(!path->should_be_locked); EBUG_ON(!path->should_be_locked);
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
EBUG_ON(bpos_cmp(k->k.p, path->pos)); EBUG_ON(!bpos_eq(k->k.p, path->pos));
n = (struct btree_insert_entry) { n = (struct btree_insert_entry) {
.flags = flags, .flags = flags,
@ -1573,7 +1573,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
btree_id_cached(trans->c, path->btree_id)) { btree_id_cached(trans->c, path->btree_id)) {
if (!iter->key_cache_path || if (!iter->key_cache_path ||
!iter->key_cache_path->should_be_locked || !iter->key_cache_path->should_be_locked ||
bpos_cmp(iter->key_cache_path->pos, k->k.p)) { !bpos_eq(iter->key_cache_path->pos, k->k.p)) {
if (!iter->key_cache_path) if (!iter->key_cache_path)
iter->key_cache_path = iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0, bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
@ -1682,7 +1682,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
if (ret) if (ret)
goto err; goto err;
if (bkey_cmp(iter.pos, end) >= 0) if (bkey_ge(iter.pos, end))
break; break;
bkey_init(&delete.k); bkey_init(&delete.k);

View File

@ -30,7 +30,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
darray_init(&s); darray_init(&s);
if (!bkey_cmp(old_pos, new_pos)) if (bkey_eq(old_pos, new_pos))
return 0; return 0;
if (!snapshot_t(c, old_pos.snapshot)->children[0]) if (!snapshot_t(c, old_pos.snapshot)->children[0])
@ -45,7 +45,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
if (ret) if (ret)
break; break;
if (bkey_cmp(old_pos, k.k->p)) if (!bkey_eq(old_pos, k.k->p))
break; break;
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) { if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
@ -244,7 +244,7 @@ int bch2_data_update_index_update(struct bch_write_op *op)
if (ret) if (ret)
break; break;
next: next:
while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
bch2_keylist_pop_front(keys); bch2_keylist_pop_front(keys);
if (bch2_keylist_empty(keys)) if (bch2_keylist_empty(keys))
goto out; goto out;

View File

@ -306,7 +306,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (ret) if (ret)
return ret; return ret;
if (!bpos_cmp(SPOS_MAX, i->from)) if (bpos_eq(SPOS_MAX, i->from))
return i->ret; return i->ret;
bch2_trans_init(&trans, i->c, 0, 0); bch2_trans_init(&trans, i->c, 0, 0);
@ -317,7 +317,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
break; break;
bch2_btree_node_to_text(&i->buf, i->c, b); bch2_btree_node_to_text(&i->buf, i->c, b);
i->from = bpos_cmp(SPOS_MAX, b->key.k.p) i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p) ? bpos_successor(b->key.k.p)
: b->key.k.p; : b->key.k.p;
} }
@ -368,7 +368,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
if (ret) if (ret)
break; break;
if (bpos_cmp(l->b->key.k.p, i->prev_node) > 0) { if (bpos_gt(l->b->key.k.p, i->prev_node)) {
bch2_btree_node_to_text(&i->buf, i->c, l->b); bch2_btree_node_to_text(&i->buf, i->c, l->b);
i->prev_node = l->b->key.k.p; i->prev_node = l->b->key.k.p;
} }

View File

@ -350,8 +350,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
bkey_init(&new_src->k); bkey_init(&new_src->k);
new_src->k.p = src_iter.pos; new_src->k.p = src_iter.pos;
if (bkey_cmp(dst_pos, src_iter.pos) <= 0 && if (bkey_le(dst_pos, src_iter.pos) &&
bkey_cmp(src_iter.pos, dst_iter.pos) < 0) { bkey_lt(src_iter.pos, dst_iter.pos)) {
/* /*
* We have a hash collision for the new dst key, * We have a hash collision for the new dst key,
* and new_src - the key we're deleting - is between * and new_src - the key we're deleting - is between

View File

@ -107,7 +107,7 @@ int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
{ {
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
if (!bkey_cmp(k.k->p, POS_MIN)) { if (bkey_eq(k.k->p, POS_MIN)) {
prt_printf(err, "stripe at POS_MIN"); prt_printf(err, "stripe at POS_MIN");
return -EINVAL; return -EINVAL;
} }
@ -724,7 +724,7 @@ static int ec_stripe_bkey_insert(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) { if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) { if (start_pos.offset) {
start_pos = min_pos; start_pos = min_pos;
bch2_btree_iter_set_pos(&iter, start_pos); bch2_btree_iter_set_pos(&iter, start_pos);

View File

@ -73,8 +73,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, for_each_btree_key_norestart(trans, iter,
BTREE_ID_reflink, POS(0, idx + offset), BTREE_ID_reflink, POS(0, idx + offset),
BTREE_ITER_SLOTS, r_k, ret2) { BTREE_ITER_SLOTS, r_k, ret2) {
if (bkey_cmp(bkey_start_pos(r_k.k), if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
POS(0, idx + sectors)) >= 0)
break; break;
/* extent_update_to_keys(), for the reflink_v update */ /* extent_update_to_keys(), for the reflink_v update */
@ -132,11 +131,10 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
for_each_btree_key_continue_norestart(copy, 0, k, ret) { for_each_btree_key_continue_norestart(copy, 0, k, ret) {
unsigned offset = 0; unsigned offset = 0;
if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0) if (bkey_ge(bkey_start_pos(k.k), *end))
break; break;
if (bkey_cmp(bkey_start_pos(&insert->k), if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
bkey_start_pos(k.k)) > 0)
offset = bkey_start_offset(&insert->k) - offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k); bkey_start_offset(k.k);

View File

@ -227,7 +227,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
if (version < bcachefs_metadata_version_inode_btree_change && if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) && btree_node_type_is_extents(btree_id) &&
bkey_cmp(bp.v->min_key, POS_MIN)) !bkey_eq(bp.v->min_key, POS_MIN))
bp.v->min_key = write bp.v->min_key = write
? bpos_nosnap_predecessor(bp.v->min_key) ? bpos_nosnap_predecessor(bp.v->min_key)
: bpos_nosnap_successor(bp.v->min_key); : bpos_nosnap_successor(bp.v->min_key);
@ -1211,10 +1211,10 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k)
int val_u64s_delta; int val_u64s_delta;
u64 sub; u64 sub;
if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0) if (bkey_le(where, bkey_start_pos(k.k)))
return 0; return 0;
EBUG_ON(bkey_cmp(where, k.k->p) > 0); EBUG_ON(bkey_gt(where, k.k->p));
sub = where.offset - bkey_start_offset(k.k); sub = where.offset - bkey_start_offset(k.k);
@ -1291,10 +1291,10 @@ int bch2_cut_back_s(struct bpos where, struct bkey_s k)
int val_u64s_delta; int val_u64s_delta;
u64 len = 0; u64 len = 0;
if (bkey_cmp(where, k.k->p) >= 0) if (bkey_ge(where, k.k->p))
return 0; return 0;
EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0); EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
len = where.offset - bkey_start_offset(k.k); len = where.offset - bkey_start_offset(k.k);

View File

@ -636,9 +636,8 @@ enum bch_extent_overlap {
static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k, static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
const struct bkey *m) const struct bkey *m)
{ {
int cmp1 = bkey_cmp(k->p, m->p) < 0; int cmp1 = bkey_lt(k->p, m->p);
int cmp2 = bkey_cmp(bkey_start_pos(k), int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
bkey_start_pos(m)) > 0;
return (cmp1 << 1) + cmp2; return (cmp1 << 1) + cmp2;
} }

View File

@ -2043,7 +2043,7 @@ static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot), SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, err) { BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0) if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
break; break;
if (k.k->p.snapshot != snapshot || if (k.k->p.snapshot != snapshot ||
@ -2532,7 +2532,7 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol,
goto err; goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) { for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) if (bkey_ge(bkey_start_pos(k.k), end))
break; break;
if (bkey_extent_is_data(k.k)) { if (bkey_extent_is_data(k.k)) {
@ -2970,13 +2970,13 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
break; break;
if (insert && if (insert &&
bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0) bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
break; break;
reassemble: reassemble:
bch2_bkey_buf_reassemble(&copy, c, k); bch2_bkey_buf_reassemble(&copy, c, k);
if (insert && if (insert &&
bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) bkey_lt(bkey_start_pos(k.k), move_pos))
bch2_cut_front(move_pos, copy.k); bch2_cut_front(move_pos, copy.k);
copy.k->k.p.offset += shift >> 9; copy.k->k.p.offset += shift >> 9;
@ -2986,7 +2986,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
if (ret) if (ret)
continue; continue;
if (bkey_cmp(atomic_end, copy.k->k.p)) { if (!bkey_eq(atomic_end, copy.k->k.p)) {
if (insert) { if (insert) {
move_pos = atomic_end; move_pos = atomic_end;
move_pos.offset -= shift >> 9; move_pos.offset -= shift >> 9;
@ -3064,7 +3064,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
POS(inode->v.i_ino, start_sector), POS(inode->v.i_ino, start_sector),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT); BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (!ret && bkey_cmp(iter.pos, end_pos) < 0) { while (!ret && bkey_lt(iter.pos, end_pos)) {
s64 i_sectors_delta = 0; s64 i_sectors_delta = 0;
struct disk_reservation disk_res = { 0 }; struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 }; struct quota_res quota_res = { 0 };

View File

@ -133,7 +133,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
if (ret) if (ret)
goto err; goto err;
if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) { if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
ret = -ENOENT; ret = -ENOENT;
goto err; goto err;
} }
@ -527,7 +527,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
}; };
int ret = 0; int ret = 0;
if (bkey_cmp(s->pos, pos)) if (!bkey_eq(s->pos, pos))
s->ids.nr = 0; s->ids.nr = 0;
pos.snapshot = n.equiv; pos.snapshot = n.equiv;
@ -825,7 +825,7 @@ static int hash_check_key(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, desc.btree_id, for_each_btree_key_norestart(trans, iter, desc.btree_id,
POS(hash_k.k->p.inode, hash), POS(hash_k.k->p.inode, hash),
BTREE_ITER_SLOTS, k, ret) { BTREE_ITER_SLOTS, k, ret) {
if (!bkey_cmp(k.k->p, hash_k.k->p)) if (bkey_eq(k.k->p, hash_k.k->p))
break; break;
if (fsck_err_on(k.k->type == desc.key_type && if (fsck_err_on(k.k->type == desc.key_type &&
@ -1199,7 +1199,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
BUG_ON(!iter->path->should_be_locked); BUG_ON(!iter->path->should_be_locked);
#if 0 #if 0
if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) { if (bkey_gt(prev.k->k.p, bkey_start_pos(k.k))) {
char buf1[200]; char buf1[200];
char buf2[200]; char buf2[200];

View File

@ -543,7 +543,7 @@ int bch2_inode_create(struct btree_trans *trans,
again: again:
while ((k = bch2_btree_iter_peek(iter)).k && while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) && !(ret = bkey_err(k)) &&
bkey_cmp(k.k->p, POS(0, max)) < 0) { bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset) if (pos < iter->pos.offset)
goto found_slot; goto found_slot;

View File

@ -237,7 +237,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
(!new_compressed && bch2_bkey_sectors_compressed(old)))) (!new_compressed && bch2_bkey_sectors_compressed(old))))
*usage_increasing = true; *usage_increasing = true;
if (bkey_cmp(old.k->p, new->k.p) >= 0) { if (bkey_ge(old.k->p, new->k.p)) {
/* /*
* Check if there's already data above where we're * Check if there's already data above where we're
* going to be writing to - this means we're definitely * going to be writing to - this means we're definitely
@ -420,7 +420,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
bch2_btree_iter_set_snapshot(iter, snapshot); bch2_btree_iter_set_snapshot(iter, snapshot);
k = bch2_btree_iter_peek(iter); k = bch2_btree_iter_peek(iter);
if (bkey_cmp(iter->pos, end_pos) >= 0) { if (bkey_ge(iter->pos, end_pos)) {
bch2_btree_iter_set_pos(iter, end_pos); bch2_btree_iter_set_pos(iter, end_pos);
break; break;
} }
@ -518,7 +518,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
if (ec_ob) if (ec_ob)
bch2_ob_add_backpointer(c, ec_ob, &sk.k->k); bch2_ob_add_backpointer(c, ec_ob, &sk.k->k);
if (bkey_cmp(iter.pos, k->k.p) >= 0) if (bkey_ge(iter.pos, k->k.p))
bch2_keylist_pop_front(&op->insert_keys); bch2_keylist_pop_front(&op->insert_keys);
else else
bch2_cut_front(iter.pos, k); bch2_cut_front(iter.pos, k);
@ -1398,7 +1398,7 @@ void bch2_write(struct closure *cl)
EBUG_ON(op->cl.parent); EBUG_ON(op->cl.parent);
BUG_ON(!op->nr_replicas); BUG_ON(!op->nr_replicas);
BUG_ON(!op->write_point.v); BUG_ON(!op->write_point.v);
BUG_ON(!bkey_cmp(op->pos, POS_MAX)); BUG_ON(bkey_eq(op->pos, POS_MAX));
op->start_time = local_clock(); op->start_time = local_clock();
bch2_keylist_init(&op->insert_keys, op->inline_keys); bch2_keylist_init(&op->insert_keys, op->inline_keys);

View File

@ -36,7 +36,7 @@ void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
struct bkey_i *where; struct bkey_i *where;
for_each_keylist_key(l, where) for_each_keylist_key(l, where)
if (bkey_cmp(insert->k.p, where->k.p) < 0) if (bpos_lt(insert->k.p, where->k.p))
break; break;
memmove_u64s_up((u64 *) where + insert->k.u64s, memmove_u64s_up((u64 *) where + insert->k.u64s,
@ -63,6 +63,6 @@ void bch2_verify_keylist_sorted(struct keylist *l)
for_each_keylist_key(l, k) for_each_keylist_key(l, k)
BUG_ON(bkey_next(k) != l->top && BUG_ON(bkey_next(k) != l->top &&
bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0); bpos_ge(k->k.p, bkey_next(k)->k.p));
} }
#endif #endif

View File

@ -340,7 +340,7 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos,
if (ret) if (ret)
goto err; goto err;
if (!k.k || bkey_cmp(k.k->p, pos)) { if (!k.k || !bkey_eq(k.k->p, pos)) {
ret = -ENOENT; ret = -ENOENT;
goto err; goto err;
} }
@ -446,7 +446,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (ret) if (ret)
break; break;
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) if (bkey_ge(bkey_start_pos(k.k), end))
break; break;
ctxt->stats->pos = iter.pos; ctxt->stats->pos = iter.pos;

View File

@ -132,9 +132,8 @@ struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree
(k = idx_to_key(keys, *idx), (k = idx_to_key(keys, *idx),
k->btree_id == btree_id && k->btree_id == btree_id &&
k->level == level && k->level == level &&
bpos_cmp(k->k->k.p, end_pos) <= 0)) { bpos_le(k->k->k.p, end_pos))) {
if (bpos_cmp(k->k->k.p, pos) >= 0 && if (bpos_ge(k->k->k.p, pos) && !k->overwritten)
!k->overwritten)
return k->k; return k->k;
(*idx)++; (*idx)++;
@ -295,7 +294,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
if (idx < keys->size && if (idx < keys->size &&
keys->d[idx].btree_id == btree && keys->d[idx].btree_id == btree &&
keys->d[idx].level == level && keys->d[idx].level == level &&
!bpos_cmp(keys->d[idx].k->k.p, pos)) bpos_eq(keys->d[idx].k->k.p, pos))
keys->d[idx].overwritten = true; keys->d[idx].overwritten = true;
} }
@ -354,7 +353,7 @@ static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter) void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
{ {
if (!bpos_cmp(iter->pos, SPOS_MAX)) if (bpos_eq(iter->pos, SPOS_MAX))
iter->at_end = true; iter->at_end = true;
else else
iter->pos = bpos_successor(iter->pos); iter->pos = bpos_successor(iter->pos);
@ -368,19 +367,19 @@ struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *
return bkey_s_c_null; return bkey_s_c_null;
while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
bpos_cmp(btree_k.k->p, iter->pos) < 0) bpos_lt(btree_k.k->p, iter->pos))
bch2_journal_iter_advance_btree(iter); bch2_journal_iter_advance_btree(iter);
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k && while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
bpos_cmp(journal_k.k->p, iter->pos) < 0) bpos_lt(journal_k.k->p, iter->pos))
bch2_journal_iter_advance(&iter->journal); bch2_journal_iter_advance(&iter->journal);
ret = journal_k.k && ret = journal_k.k &&
(!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0) (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
? journal_k ? journal_k
: btree_k; : btree_k;
if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
ret = bkey_s_c_null; ret = bkey_s_c_null;
if (ret.k) { if (ret.k) {
@ -528,7 +527,7 @@ static int journal_keys_sort(struct bch_fs *c)
while (src + 1 < keys->d + keys->nr && while (src + 1 < keys->d + keys->nr &&
src[0].btree_id == src[1].btree_id && src[0].btree_id == src[1].btree_id &&
src[0].level == src[1].level && src[0].level == src[1].level &&
!bpos_cmp(src[0].k->k.p, src[1].k->k.p)) bpos_eq(src[0].k->k.p, src[1].k->k.p))
src++; src++;
*dst++ = *src++; *dst++ = *src++;

View File

@ -252,14 +252,14 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
int ret; int ret;
for_each_btree_key_continue_norestart(*iter, 0, k, ret) { for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
if (bkey_cmp(iter->pos, end) >= 0) if (bkey_ge(iter->pos, end))
break; break;
if (bkey_extent_is_data(k.k)) if (bkey_extent_is_data(k.k))
return k; return k;
} }
if (bkey_cmp(iter->pos, end) >= 0) if (bkey_ge(iter->pos, end))
bch2_btree_iter_set_pos(iter, end); bch2_btree_iter_set_pos(iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null; return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
} }
@ -301,7 +301,7 @@ s64 bch2_remap_range(struct bch_fs *c,
while ((ret == 0 || while ((ret == 0 ||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) && bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
bkey_cmp(dst_iter.pos, dst_end) < 0) { bkey_lt(dst_iter.pos, dst_end)) {
struct disk_reservation disk_res = { 0 }; struct disk_reservation disk_res = { 0 };
bch2_trans_begin(&trans); bch2_trans_begin(&trans);
@ -334,7 +334,7 @@ s64 bch2_remap_range(struct bch_fs *c,
if (ret) if (ret)
continue; continue;
if (bkey_cmp(src_want, src_iter.pos) < 0) { if (bkey_lt(src_want, src_iter.pos)) {
ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum, ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum,
min(dst_end.offset, min(dst_end.offset,
dst_iter.pos.offset + dst_iter.pos.offset +
@ -386,8 +386,8 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_trans_iter_exit(&trans, &dst_iter); bch2_trans_iter_exit(&trans, &dst_iter);
bch2_trans_iter_exit(&trans, &src_iter); bch2_trans_iter_exit(&trans, &src_iter);
BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end)); BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0); BUG_ON(bkey_gt(dst_iter.pos, dst_end));
dst_done = dst_iter.pos.offset - dst_start.offset; dst_done = dst_iter.pos.offset - dst_start.offset;
new_i_size = min(dst_iter.pos.offset << 9, new_i_size); new_i_size = min(dst_iter.pos.offset << 9, new_i_size);

View File

@ -30,8 +30,8 @@ int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_snapshot s; struct bkey_s_c_snapshot s;
u32 i, id; u32 i, id;
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 || if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
bkey_cmp(k.k->p, POS(0, 1)) < 0) { bkey_lt(k.k->p, POS(0, 1))) {
prt_printf(err, "bad pos"); prt_printf(err, "bad pos");
return -EINVAL; return -EINVAL;
} }
@ -592,7 +592,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv; u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
if (bkey_cmp(k.k->p, *last_pos)) if (!bkey_eq(k.k->p, *last_pos))
equiv_seen->nr = 0; equiv_seen->nr = 0;
*last_pos = k.k->p; *last_pos = k.k->p;
@ -770,8 +770,8 @@ static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k, int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err) int rw, struct printbuf *err)
{ {
if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 || if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) { bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
prt_printf(err, "invalid pos"); prt_printf(err, "invalid pos");
return -EINVAL; return -EINVAL;
} }
@ -1028,7 +1028,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN, for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
break; break;
/* /*