mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
bcachefs: iter/update/trigger/str_hash flag cleanup
Combine iter/update/trigger/str_hash flags into a single enum, and x-macroize them for a to_text() function later. These flags are all for a specific iter/key/update context, so it makes sense to group them together - iter/update/trigger flags were already given distinct bits, this cleans up and unifies that handling. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
bf5f6a689b
commit
5dd8c60e1e
@ -360,7 +360,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
@ -411,7 +411,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
struct posix_acl *acl = NULL;
|
||||
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash_info, inum, &search, BTREE_ITER_INTENT);
|
||||
&hash_info, inum, &search, BTREE_ITER_intent);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
|
||||
|
@ -437,9 +437,9 @@ bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
|
||||
BTREE_ITER_WITH_UPDATES|
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_with_updates|
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
return ERR_PTR(ret);
|
||||
@ -510,7 +510,7 @@ int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
int ret;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
@ -563,7 +563,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
|
||||
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
|
||||
u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
|
||||
|
||||
@ -589,7 +589,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
}));
|
||||
} else {
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
@ -657,7 +657,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
|
||||
old = bch2_bkey_get_iter(trans, &iter, btree,
|
||||
bkey_start_pos(&k->k),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -701,8 +701,8 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
|
||||
return ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_with_updates);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -738,7 +738,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
struct bch_alloc_v4 old_a_convert;
|
||||
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
|
||||
|
||||
new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
|
||||
@ -802,7 +802,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
* not:
|
||||
*/
|
||||
|
||||
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
|
||||
if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
|
||||
old_a->cached_sectors) {
|
||||
ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
|
||||
-((s64) old_a->cached_sectors));
|
||||
@ -811,12 +811,12 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
|
||||
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
u64 bucket_journal_seq = new_a->journal_seq;
|
||||
|
||||
if ((flags & BTREE_TRIGGER_INSERT) &&
|
||||
if ((flags & BTREE_TRIGGER_insert) &&
|
||||
data_type_is_empty(old_a->data_type) !=
|
||||
data_type_is_empty(new_a->data_type) &&
|
||||
new.k->type == KEY_TYPE_alloc_v4) {
|
||||
@ -877,8 +877,8 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
bch2_do_gc_gens(c);
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_GC) &&
|
||||
(flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
|
||||
if ((flags & BTREE_TRIGGER_gc) &&
|
||||
(flags & BTREE_TRIGGER_bucket_invalidate)) {
|
||||
struct bch_alloc_v4 new_a_convert;
|
||||
const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
|
||||
|
||||
@ -903,7 +903,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
/*
|
||||
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
|
||||
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
|
||||
* extents style btrees, but works on non-extents btrees:
|
||||
*/
|
||||
static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
|
||||
@ -1401,13 +1401,13 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
while (1) {
|
||||
struct bpos next;
|
||||
@ -1469,13 +1469,13 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
|
||||
ret = for_each_btree_key(trans, iter,
|
||||
BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
bch2_check_discard_freespace_key(trans, &iter));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
@ -1505,7 +1505,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_bucket_gens_key(trans, &iter, k));
|
||||
err:
|
||||
@ -1552,7 +1552,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
|
||||
a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
|
||||
ret = bch2_trans_update(trans, alloc_iter,
|
||||
&a_mut->k_i, BTREE_TRIGGER_NORUN);
|
||||
&a_mut->k_i, BTREE_TRIGGER_norun);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1591,7 +1591,7 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_alloc_to_lru_ref(trans, &iter)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -1693,7 +1693,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
|
||||
need_discard_iter->pos,
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1817,7 +1817,7 @@ void bch2_do_discards(struct bch_fs *c)
|
||||
static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1952,7 +1952,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
|
||||
|
||||
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
|
||||
BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
|
||||
BTREE_TRIGGER_bucket_invalidate) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_WATERMARK_btree|
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
@ -2004,7 +2004,7 @@ static void bch2_do_invalidates_work(struct work_struct *work)
|
||||
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(ca->dev_idx, 0, 0),
|
||||
lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
|
||||
|
||||
if (ret < 0) {
|
||||
@ -2041,7 +2041,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
/*
|
||||
* Scan the alloc btree for every bucket on @ca, and add buckets to the
|
||||
* freespace/need_discard/need_gc_gens btrees as needed:
|
||||
|
@ -300,7 +300,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter,
|
||||
BTREE_ID_alloc, POS(ca->dev_idx, b),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret) {
|
||||
ob = ERR_PTR(ret);
|
||||
@ -344,7 +344,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
|
||||
&bp_pos, &bp,
|
||||
BTREE_ITER_NOPRESERVE);
|
||||
BTREE_ITER_nopreserve);
|
||||
if (ret) {
|
||||
ob = ERR_PTR(ret);
|
||||
goto err;
|
||||
@ -404,7 +404,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
*/
|
||||
again:
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a;
|
||||
|
||||
@ -420,7 +420,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
continue;
|
||||
|
||||
/* now check the cached key to serialize concurrent allocs of the bucket */
|
||||
ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
|
||||
ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
|
||||
ret = bkey_err(ck);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -170,9 +170,9 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
|
||||
bp_k->k.p,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_slots|
|
||||
BTREE_ITER_with_updates);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -212,7 +212,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
|
||||
|
||||
if (gen >= 0) {
|
||||
k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
|
||||
bucket, BTREE_ITER_CACHED|iter_flags);
|
||||
bucket, BTREE_ITER_cached|iter_flags);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -759,7 +759,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
|
||||
__for_each_btree_node(trans, iter, btree,
|
||||
btree == start.btree ? start.pos : POS_MIN,
|
||||
0, depth, BTREE_ITER_PREFETCH, b, ret) {
|
||||
0, depth, BTREE_ITER_prefetch, b, ret) {
|
||||
mem_may_pin -= btree_buf_bytes(b);
|
||||
if (mem_may_pin <= 0) {
|
||||
c->btree_cache.pinned_nodes_end = *end =
|
||||
@ -794,7 +794,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
while (level >= depth) {
|
||||
struct btree_iter iter;
|
||||
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
|
||||
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
|
||||
@ -917,7 +917,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
|
||||
struct bpos last_flushed_pos = SPOS_MAX;
|
||||
|
||||
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_one_backpointer(trans, start, end,
|
||||
bkey_s_c_to_backpointer(k),
|
||||
|
@ -29,7 +29,8 @@ struct bkey_ops {
|
||||
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
|
||||
bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
int (*trigger)(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
void (*compat)(enum btree_id id, unsigned version,
|
||||
unsigned big_endian, int write,
|
||||
struct bkey_s);
|
||||
@ -76,56 +77,10 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
|
||||
|
||||
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
|
||||
enum btree_update_flags {
|
||||
__BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
|
||||
__BTREE_UPDATE_NOJOURNAL,
|
||||
__BTREE_UPDATE_KEY_CACHE_RECLAIM,
|
||||
|
||||
__BTREE_TRIGGER_NORUN,
|
||||
__BTREE_TRIGGER_TRANSACTIONAL,
|
||||
__BTREE_TRIGGER_ATOMIC,
|
||||
__BTREE_TRIGGER_GC,
|
||||
__BTREE_TRIGGER_INSERT,
|
||||
__BTREE_TRIGGER_OVERWRITE,
|
||||
__BTREE_TRIGGER_BUCKET_INVALIDATE,
|
||||
};
|
||||
|
||||
#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
|
||||
#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
|
||||
#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
|
||||
|
||||
/* Don't run triggers at all */
|
||||
#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
|
||||
|
||||
/*
|
||||
* If set, we're running transactional triggers as part of a transaction commit:
|
||||
* triggers may generate new updates
|
||||
*
|
||||
* If cleared, and either BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE are set,
|
||||
* we're running atomic triggers during a transaction commit: we have our
|
||||
* journal reservation, we're holding btree node write locks, and we know the
|
||||
* transaction is going to commit (returning an error here is a fatal error,
|
||||
* causing us to go emergency read-only)
|
||||
*/
|
||||
#define BTREE_TRIGGER_TRANSACTIONAL (1U << __BTREE_TRIGGER_TRANSACTIONAL)
|
||||
#define BTREE_TRIGGER_ATOMIC (1U << __BTREE_TRIGGER_ATOMIC)
|
||||
|
||||
/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
|
||||
#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
|
||||
|
||||
/* @new is entering the btree */
|
||||
#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
|
||||
|
||||
/* @old is leaving the btree */
|
||||
#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
|
||||
|
||||
/* signal from bucket invalidate path to alloc trigger */
|
||||
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
|
||||
|
||||
static inline int bch2_key_trigger(struct btree_trans *trans,
|
||||
enum btree_id btree, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
|
||||
|
||||
@ -135,8 +90,9 @@ static inline int bch2_key_trigger(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static inline int bch2_key_trigger_old(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i deleted;
|
||||
|
||||
@ -144,12 +100,13 @@ static inline int bch2_key_trigger_old(struct btree_trans *trans,
|
||||
deleted.k.p = old.k->p;
|
||||
|
||||
return bch2_key_trigger(trans, btree_id, level, old, bkey_i_to_s(&deleted),
|
||||
BTREE_TRIGGER_OVERWRITE|flags);
|
||||
BTREE_TRIGGER_overwrite|flags);
|
||||
}
|
||||
|
||||
static inline int bch2_key_trigger_new(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s new, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s new,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i deleted;
|
||||
|
||||
@ -157,7 +114,7 @@ static inline int bch2_key_trigger_new(struct btree_trans *trans,
|
||||
deleted.k.p = new.k->p;
|
||||
|
||||
return bch2_key_trigger(trans, btree_id, level, bkey_i_to_s_c(&deleted), new,
|
||||
BTREE_TRIGGER_INSERT|flags);
|
||||
BTREE_TRIGGER_insert|flags);
|
||||
}
|
||||
|
||||
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
|
||||
|
@ -856,7 +856,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_key_trigger(trans, btree_id, level, old,
|
||||
unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
|
||||
unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_gc));
|
||||
fsck_err:
|
||||
err:
|
||||
printbuf_exit(&buf);
|
||||
@ -900,7 +900,7 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
|
||||
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
|
||||
|
||||
__for_each_btree_node(trans, iter, btree_id, POS_MIN,
|
||||
0, target_depth, BTREE_ITER_PREFETCH, b, ret) {
|
||||
0, target_depth, BTREE_ITER_prefetch, b, ret) {
|
||||
bch2_verify_btree_nr_keys(b);
|
||||
|
||||
gc_pos_set(c, gc_pos_btree_node(b));
|
||||
@ -1045,7 +1045,7 @@ static int bch2_mark_superblocks(struct bch_fs *c)
|
||||
mutex_lock(&c->sb_lock);
|
||||
gc_pos_set(c, gc_phase(GC_PHASE_SB));
|
||||
|
||||
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_GC);
|
||||
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1304,7 +1304,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
|
||||
a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
@ -1318,7 +1318,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c)
|
||||
for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, ca->mi.first_bucket),
|
||||
POS(ca->dev_idx, ca->mi.nbuckets - 1),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_slots|BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_alloc_write_key(trans, &iter, k)));
|
||||
if (ret) {
|
||||
@ -1350,7 +1350,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bucket *g = gc_bucket(ca, k.k->p.offset);
|
||||
|
||||
@ -1435,7 +1435,7 @@ static int bch2_gc_reflink_done(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
|
||||
c->reflink_gc_nr = 0;
|
||||
@ -1448,7 +1448,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c)
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
const __le64 *refcount = bkey_refcount_c(k);
|
||||
|
||||
if (!refcount)
|
||||
@ -1538,7 +1538,7 @@ static int bch2_gc_stripes_done(struct bch_fs *c)
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_stripes_key(trans, &iter, k)));
|
||||
}
|
||||
@ -1762,7 +1762,7 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, i,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -1774,7 +1774,7 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH,
|
||||
BTREE_ITER_prefetch,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
|
@ -61,7 +61,7 @@ static inline int btree_path_cmp(const struct btree_path *l,
|
||||
static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
|
||||
{
|
||||
/* Are we iterating over keys in all snapshots? */
|
||||
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_all_snapshots) {
|
||||
p = bpos_successor(p);
|
||||
} else {
|
||||
p = bpos_nosnap_successor(p);
|
||||
@ -74,7 +74,7 @@ static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
|
||||
static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
|
||||
{
|
||||
/* Are we iterating over keys in all snapshots? */
|
||||
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_all_snapshots) {
|
||||
p = bpos_predecessor(p);
|
||||
} else {
|
||||
p = bpos_nosnap_predecessor(p);
|
||||
@ -88,7 +88,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = iter->pos;
|
||||
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_is_extents) &&
|
||||
!bkey_eq(pos, POS_MAX))
|
||||
pos = bkey_successor(iter, pos);
|
||||
return pos;
|
||||
@ -253,13 +253,13 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
|
||||
BUG_ON(iter->btree_id >= BTREE_ID_NR);
|
||||
|
||||
BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
|
||||
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
|
||||
|
||||
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
|
||||
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
|
||||
(iter->flags & BTREE_ITER_all_snapshots));
|
||||
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_SNAPSHOT_FIELD) &&
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
|
||||
(iter->flags & BTREE_ITER_all_snapshots) &&
|
||||
!btree_type_has_snapshot_field(iter->btree_id));
|
||||
|
||||
if (iter->update_path)
|
||||
@ -269,10 +269,10 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
|
||||
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
||||
{
|
||||
BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
!iter->pos.snapshot);
|
||||
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
|
||||
iter->pos.snapshot != iter->snapshot);
|
||||
|
||||
BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
|
||||
@ -289,7 +289,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
if (!bch2_debug_check_iterators)
|
||||
return 0;
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_filter_snapshots))
|
||||
return 0;
|
||||
|
||||
if (bkey_err(k) || !k.k)
|
||||
@ -300,8 +300,8 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
k.k->p.snapshot));
|
||||
|
||||
bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
|
||||
BTREE_ITER_NOPRESERVE|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_nopreserve|
|
||||
BTREE_ITER_all_snapshots);
|
||||
prev = bch2_btree_iter_prev(©);
|
||||
if (!prev.k)
|
||||
goto out;
|
||||
@ -897,7 +897,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_buf_reassemble(out, c, k);
|
||||
|
||||
if ((flags & BTREE_ITER_PREFETCH) &&
|
||||
if ((flags & BTREE_ITER_prefetch) &&
|
||||
c->opts.btree_node_prefetch)
|
||||
ret = btree_path_prefetch_j(trans, path, &jiter);
|
||||
|
||||
@ -944,7 +944,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
|
||||
|
||||
if ((flags & BTREE_ITER_PREFETCH) &&
|
||||
if ((flags & BTREE_ITER_prefetch) &&
|
||||
c->opts.btree_node_prefetch) {
|
||||
ret = btree_path_prefetch(trans, path);
|
||||
if (ret)
|
||||
@ -1659,8 +1659,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
unsigned flags, unsigned long ip)
|
||||
{
|
||||
struct btree_path *path;
|
||||
bool cached = flags & BTREE_ITER_CACHED;
|
||||
bool intent = flags & BTREE_ITER_INTENT;
|
||||
bool cached = flags & BTREE_ITER_cached;
|
||||
bool intent = flags & BTREE_ITER_intent;
|
||||
struct trans_for_each_path_inorder_iter iter;
|
||||
btree_path_idx_t path_pos = 0, path_idx;
|
||||
|
||||
@ -1708,7 +1708,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
trans->paths_sorted = false;
|
||||
}
|
||||
|
||||
if (!(flags & BTREE_ITER_NOPRESERVE))
|
||||
if (!(flags & BTREE_ITER_nopreserve))
|
||||
path->preserve = true;
|
||||
|
||||
if (path->intent_ref)
|
||||
@ -1786,7 +1786,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path,
|
||||
btree_iter_search_key(iter),
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
|
||||
@ -1825,7 +1825,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
iter->k.p = iter->pos = b->key.k.p;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
out:
|
||||
@ -1892,7 +1892,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
*/
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path,
|
||||
bpos_successor(iter->pos),
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
path = btree_iter_path(trans, iter);
|
||||
@ -1910,7 +1910,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
iter->k.p = iter->pos = b->key.k.p;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
|
||||
@ -1929,11 +1929,11 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = iter->k.p;
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
|
||||
? bpos_eq(pos, SPOS_MAX)
|
||||
: bkey_eq(pos, SPOS_MAX));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (ret && !(iter->flags & BTREE_ITER_is_extents))
|
||||
pos = bkey_successor(iter, pos);
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
return ret;
|
||||
@ -1942,11 +1942,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
|
||||
? bpos_eq(pos, POS_MIN)
|
||||
: bkey_eq(pos, POS_MIN));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (ret && !(iter->flags & BTREE_ITER_is_extents))
|
||||
pos = bkey_predecessor(iter, pos);
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
return ret;
|
||||
@ -2057,7 +2057,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
|
||||
if ((iter->flags & BTREE_ITER_key_cache_fill) &&
|
||||
bpos_eq(iter->pos, pos))
|
||||
return bkey_s_c_null;
|
||||
|
||||
@ -2066,17 +2066,17 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
|
||||
if (!iter->key_cache_path)
|
||||
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
|
||||
iter->flags & BTREE_ITER_INTENT, 0,
|
||||
iter->flags|BTREE_ITER_CACHED|
|
||||
BTREE_ITER_CACHED_NOFILL,
|
||||
iter->flags & BTREE_ITER_intent, 0,
|
||||
iter->flags|BTREE_ITER_cached|
|
||||
BTREE_ITER_cached_nofill,
|
||||
_THIS_IP_);
|
||||
|
||||
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
|
||||
iter->flags|BTREE_ITER_CACHED) ?:
|
||||
iter->flags|BTREE_ITER_cached) ?:
|
||||
bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
@ -2104,7 +2104,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
struct btree_path_level *l;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2129,7 +2129,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
|
||||
k = btree_path_level_peek_all(trans->c, l, &iter->k);
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
|
||||
k.k &&
|
||||
(k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
|
||||
k = k2;
|
||||
@ -2140,10 +2140,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_journal))
|
||||
k = btree_trans_peek_journal(trans, iter, k);
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates))
|
||||
bch2_btree_trans_peek_updates(trans, iter, &k);
|
||||
|
||||
@ -2195,11 +2195,11 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
struct bpos iter_pos;
|
||||
int ret;
|
||||
|
||||
EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
|
||||
EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
|
||||
|
||||
if (iter->update_path) {
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
}
|
||||
|
||||
@ -2222,7 +2222,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
|
||||
* that's what we check against in extents mode:
|
||||
*/
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
|
||||
? bkey_gt(k.k->p, end)
|
||||
: k.k->p.inode > end.inode))
|
||||
goto end;
|
||||
@ -2230,13 +2230,13 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
if (iter->update_path &&
|
||||
!bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
}
|
||||
|
||||
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
(iter->flags & BTREE_ITER_INTENT) &&
|
||||
!(iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
(iter->flags & BTREE_ITER_intent) &&
|
||||
!(iter->flags & BTREE_ITER_is_extents) &&
|
||||
!iter->update_path) {
|
||||
struct bpos pos = k.k->p;
|
||||
|
||||
@ -2251,12 +2251,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* advance, same as on exit for iter->path, but only up
|
||||
* to snapshot
|
||||
*/
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = iter->path;
|
||||
|
||||
iter->update_path = bch2_btree_path_set_pos(trans,
|
||||
iter->update_path, pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
|
||||
if (unlikely(ret)) {
|
||||
@ -2269,7 +2269,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* We can never have a key in a leaf node at POS_MAX, so
|
||||
* we don't have to check these successor() calls:
|
||||
*/
|
||||
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
if ((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
!bch2_snapshot_is_ancestor(trans->c,
|
||||
iter->snapshot,
|
||||
k.k->p.snapshot)) {
|
||||
@ -2278,7 +2278,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
}
|
||||
|
||||
if (bkey_whiteout(k.k) &&
|
||||
!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
|
||||
!(iter->flags & BTREE_ITER_all_snapshots)) {
|
||||
search_key = bkey_successor(iter, k.k->p);
|
||||
continue;
|
||||
}
|
||||
@ -2288,12 +2288,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* equal to the key we just returned - except extents can
|
||||
* straddle iter->pos:
|
||||
*/
|
||||
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (!(iter->flags & BTREE_ITER_is_extents))
|
||||
iter_pos = k.k->p;
|
||||
else
|
||||
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
|
||||
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
|
||||
? bkey_gt(iter_pos, end)
|
||||
: bkey_ge(iter_pos, end)))
|
||||
goto end;
|
||||
@ -2304,7 +2304,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
iter->pos = iter_pos;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
@ -2317,7 +2317,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
btree_path_set_should_be_locked(trans->paths + iter->update_path);
|
||||
}
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
iter->pos.snapshot = iter->snapshot;
|
||||
|
||||
ret = bch2_btree_iter_verify_ret(iter, k);
|
||||
@ -2370,18 +2370,18 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
EBUG_ON(btree_iter_path(trans, iter)->cached ||
|
||||
btree_iter_path(trans, iter)->level);
|
||||
|
||||
if (iter->flags & BTREE_ITER_WITH_JOURNAL)
|
||||
if (iter->flags & BTREE_ITER_with_journal)
|
||||
return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
|
||||
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
search_key.snapshot = U32_MAX;
|
||||
|
||||
while (1) {
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2396,17 +2396,17 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
|
||||
k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
|
||||
if (!k.k ||
|
||||
((iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
((iter->flags & BTREE_ITER_is_extents)
|
||||
? bpos_ge(bkey_start_pos(k.k), search_key)
|
||||
: bpos_gt(k.k->p, search_key)))
|
||||
k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates))
|
||||
bch2_btree_trans_peek_prev_updates(trans, iter, &k);
|
||||
|
||||
if (likely(k.k)) {
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots) {
|
||||
if (k.k->p.snapshot == iter->snapshot)
|
||||
goto got_key;
|
||||
|
||||
@ -2417,7 +2417,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
*/
|
||||
if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
|
||||
bch2_path_put_nokeep(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->path = saved_path;
|
||||
saved_path = 0;
|
||||
iter->k = saved_k;
|
||||
@ -2430,9 +2430,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
k.k->p.snapshot)) {
|
||||
if (saved_path)
|
||||
bch2_path_put_nokeep(trans, saved_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
saved_path = btree_path_clone(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
path = btree_iter_path(trans, iter);
|
||||
saved_k = *k.k;
|
||||
saved_v = k.v;
|
||||
@ -2443,9 +2443,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
}
|
||||
got_key:
|
||||
if (bkey_whiteout(k.k) &&
|
||||
!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
|
||||
!(iter->flags & BTREE_ITER_all_snapshots)) {
|
||||
search_key = bkey_predecessor(iter, k.k->p);
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
search_key.snapshot = U32_MAX;
|
||||
continue;
|
||||
}
|
||||
@ -2469,11 +2469,11 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
if (bkey_lt(k.k->p, iter->pos))
|
||||
iter->pos = k.k->p;
|
||||
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
iter->pos.snapshot = iter->snapshot;
|
||||
out_no_locked:
|
||||
if (saved_path)
|
||||
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
|
||||
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
|
||||
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@ -2505,10 +2505,10 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
|
||||
|
||||
/* extents can't span inode numbers: */
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_is_extents) &&
|
||||
unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
|
||||
if (iter->pos.inode == KEY_INODE_MAX)
|
||||
return bkey_s_c_null;
|
||||
@ -2518,7 +2518,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
search_key = btree_iter_search_key(iter);
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2527,22 +2527,22 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
goto out_no_locked;
|
||||
}
|
||||
|
||||
if ((iter->flags & BTREE_ITER_CACHED) ||
|
||||
!(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
|
||||
if ((iter->flags & BTREE_ITER_cached) ||
|
||||
!(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
|
||||
k = bkey_s_c_null;
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates)) {
|
||||
bch2_btree_trans_peek_slot_updates(trans, iter, &k);
|
||||
if (k.k)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
|
||||
(k = btree_trans_peek_slot_journal(trans, iter)).k)
|
||||
goto out;
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
|
||||
(k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
|
||||
if (!bkey_err(k))
|
||||
iter->k = *k.k;
|
||||
@ -2557,12 +2557,12 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
struct bpos next;
|
||||
struct bpos end = iter->pos;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter->flags & BTREE_ITER_is_extents)
|
||||
end.offset = U64_MAX;
|
||||
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level);
|
||||
|
||||
if (iter->flags & BTREE_ITER_INTENT) {
|
||||
if (iter->flags & BTREE_ITER_intent) {
|
||||
struct btree_iter iter2;
|
||||
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
@ -2593,7 +2593,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
|
||||
if (iter->flags & BTREE_ITER_is_extents) {
|
||||
bch2_key_resize(&iter->k,
|
||||
min_t(u64, KEY_SIZE_MAX,
|
||||
(next.inode == iter->pos.inode
|
||||
@ -2777,13 +2777,13 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
|
||||
{
|
||||
if (iter->update_path)
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
if (iter->path)
|
||||
bch2_path_put(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
if (iter->key_cache_path)
|
||||
bch2_path_put(trans, iter->key_cache_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->path = 0;
|
||||
iter->update_path = 0;
|
||||
iter->key_cache_path = 0;
|
||||
@ -2808,9 +2808,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
unsigned depth,
|
||||
unsigned flags)
|
||||
{
|
||||
flags |= BTREE_ITER_NOT_EXTENTS;
|
||||
flags |= BTREE_ITER_SNAPSHOT_FIELD;
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_not_extents;
|
||||
flags |= BTREE_ITER_snapshot_field;
|
||||
flags |= BTREE_ITER_all_snapshots;
|
||||
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
|
||||
__bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
@ -2833,9 +2833,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
|
||||
dst->ip_allocated = _RET_IP_;
|
||||
#endif
|
||||
if (src->path)
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
if (src->update_path)
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
dst->key_cache_path = 0;
|
||||
}
|
||||
|
||||
|
@ -386,10 +386,10 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
|
||||
|
||||
if (unlikely(iter->update_path))
|
||||
bch2_path_put(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
new_pos.snapshot = iter->snapshot;
|
||||
|
||||
__bch2_btree_iter_set_pos(iter, new_pos);
|
||||
@ -397,7 +397,7 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
|
||||
|
||||
static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
|
||||
{
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
|
||||
iter->pos = bkey_start_pos(&iter->k);
|
||||
}
|
||||
|
||||
@ -416,20 +416,20 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned btree_id,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
|
||||
if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
|
||||
btree_id_is_extents(btree_id))
|
||||
flags |= BTREE_ITER_IS_EXTENTS;
|
||||
flags |= BTREE_ITER_is_extents;
|
||||
|
||||
if (!(flags & BTREE_ITER_SNAPSHOT_FIELD) &&
|
||||
if (!(flags & BTREE_ITER_snapshot_field) &&
|
||||
!btree_type_has_snapshot_field(btree_id))
|
||||
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags &= ~BTREE_ITER_all_snapshots;
|
||||
|
||||
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
if (!(flags & BTREE_ITER_all_snapshots) &&
|
||||
btree_type_has_snapshots(btree_id))
|
||||
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_filter_snapshots;
|
||||
|
||||
if (trans->journal_replay_not_finished)
|
||||
flags |= BTREE_ITER_WITH_JOURNAL;
|
||||
flags |= BTREE_ITER_with_journal;
|
||||
|
||||
return flags;
|
||||
}
|
||||
@ -439,10 +439,10 @@ static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!btree_id_cached(trans->c, btree_id)) {
|
||||
flags &= ~BTREE_ITER_CACHED;
|
||||
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
} else if (!(flags & BTREE_ITER_CACHED))
|
||||
flags |= BTREE_ITER_WITH_KEY_CACHE;
|
||||
flags &= ~BTREE_ITER_cached;
|
||||
flags &= ~BTREE_ITER_with_key_cache;
|
||||
} else if (!(flags & BTREE_ITER_cached))
|
||||
flags |= BTREE_ITER_with_key_cache;
|
||||
|
||||
return __bch2_btree_iter_flags(trans, btree_id, flags);
|
||||
}
|
||||
@ -619,14 +619,14 @@ u32 bch2_trans_begin(struct btree_trans *);
|
||||
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
|
||||
unsigned flags)
|
||||
{
|
||||
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
|
||||
return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
|
||||
bch2_btree_iter_peek_prev(iter);
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
|
||||
unsigned flags)
|
||||
{
|
||||
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
|
||||
return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
|
||||
bch2_btree_iter_peek(iter);
|
||||
}
|
||||
|
||||
@ -634,7 +634,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
|
||||
struct bpos end,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & BTREE_ITER_SLOTS))
|
||||
if (!(flags & BTREE_ITER_slots))
|
||||
return bch2_btree_iter_peek_upto(iter, end);
|
||||
|
||||
if (bkey_gt(iter->pos, end))
|
||||
|
@ -383,9 +383,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, ck->key.btree_id, ck->key.pos,
|
||||
BTREE_ITER_KEY_CACHE_FILL|
|
||||
BTREE_ITER_CACHED_NOFILL);
|
||||
iter.flags &= ~BTREE_ITER_WITH_JOURNAL;
|
||||
BTREE_ITER_key_cache_fill|
|
||||
BTREE_ITER_cached_nofill);
|
||||
iter.flags &= ~BTREE_ITER_with_journal;
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -515,7 +515,7 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree
|
||||
fill:
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
|
||||
if (!ck->valid && !(flags & BTREE_ITER_cached_nofill)) {
|
||||
/*
|
||||
* Using the underscore version because we haven't set
|
||||
* path->uptodate yet:
|
||||
@ -622,13 +622,13 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_slots|
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_all_snapshots);
|
||||
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
b_iter.flags &= ~BTREE_ITER_with_key_cache;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&c_iter);
|
||||
if (ret)
|
||||
@ -666,9 +666,9 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_btree_iter_traverse(&b_iter) ?:
|
||||
bch2_trans_update(trans, &b_iter, ck->k,
|
||||
BTREE_UPDATE_KEY_CACHE_RECLAIM|
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_TRIGGER_NORUN) ?:
|
||||
BTREE_UPDATE_key_cache_reclaim|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
BTREE_TRIGGER_norun) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
BCH_TRANS_COMMIT_no_enospc|
|
||||
@ -790,7 +790,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
|
||||
* flushing. The flush callback will not proceed unless ->seq matches
|
||||
* the latest pin, so make sure it starts with a consistent value.
|
||||
*/
|
||||
if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
|
||||
if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
|
||||
!journal_pin_active(&ck->journal)) {
|
||||
ck->seq = trans->journal_res.seq;
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
||||
BUG_ON(i->btree_id != path->btree_id);
|
||||
EBUG_ON(!i->level &&
|
||||
btree_type_has_snapshots(i->btree_id) &&
|
||||
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
|
||||
!(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
|
||||
i->k->k.p.snapshot &&
|
||||
bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
|
||||
@ -443,13 +443,13 @@ static int run_one_mem_trigger(struct btree_trans *trans,
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
|
||||
if (unlikely(flags & BTREE_TRIGGER_NORUN))
|
||||
if (unlikely(flags & BTREE_TRIGGER_norun))
|
||||
return 0;
|
||||
|
||||
if (old_ops->trigger == new_ops->trigger) {
|
||||
ret = bch2_key_trigger(trans, i->btree_id, i->level,
|
||||
old, bkey_i_to_s(new),
|
||||
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
|
||||
BTREE_TRIGGER_insert|BTREE_TRIGGER_overwrite|flags);
|
||||
} else {
|
||||
ret = bch2_key_trigger_new(trans, i->btree_id, i->level,
|
||||
bkey_i_to_s(new), flags) ?:
|
||||
@ -472,11 +472,11 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
|
||||
struct bkey_s_c old = { &old_k, i->old_v };
|
||||
const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
|
||||
const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
|
||||
unsigned flags = i->flags|BTREE_TRIGGER_TRANSACTIONAL;
|
||||
unsigned flags = i->flags|BTREE_TRIGGER_transactional;
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
|
||||
if ((i->flags & BTREE_TRIGGER_NORUN) ||
|
||||
if ((i->flags & BTREE_TRIGGER_norun) ||
|
||||
!(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
|
||||
return 0;
|
||||
|
||||
@ -486,8 +486,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
|
||||
i->overwrite_trigger_run = true;
|
||||
i->insert_trigger_run = true;
|
||||
return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
|
||||
BTREE_TRIGGER_INSERT|
|
||||
BTREE_TRIGGER_OVERWRITE|flags) ?: 1;
|
||||
BTREE_TRIGGER_insert|
|
||||
BTREE_TRIGGER_overwrite|flags) ?: 1;
|
||||
} else if (overwrite && !i->overwrite_trigger_run) {
|
||||
i->overwrite_trigger_run = true;
|
||||
return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
|
||||
@ -572,7 +572,7 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
trans_for_each_update(trans, i)
|
||||
BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
|
||||
BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
|
||||
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
|
||||
(!i->insert_trigger_run || !i->overwrite_trigger_run));
|
||||
#endif
|
||||
@ -590,7 +590,7 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
|
||||
|
||||
if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
|
||||
gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
|
||||
int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
|
||||
int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_gc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -686,7 +686,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
|
||||
ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
|
||||
ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_atomic|i->flags);
|
||||
if (ret)
|
||||
goto fatal_err;
|
||||
}
|
||||
@ -705,7 +705,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
|
||||
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
||||
if (i->flags & BTREE_UPDATE_nojournal)
|
||||
continue;
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
@ -1063,7 +1063,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
|
||||
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
||||
if (i->flags & BTREE_UPDATE_nojournal)
|
||||
continue;
|
||||
|
||||
/* we're going to journal the key being updated: */
|
||||
|
@ -187,36 +187,87 @@ struct btree_node_iter {
|
||||
} data[MAX_BSETS];
|
||||
};
|
||||
|
||||
#define BTREE_ITER_FLAGS() \
|
||||
x(slots) \
|
||||
x(intent) \
|
||||
x(prefetch) \
|
||||
x(is_extents) \
|
||||
x(not_extents) \
|
||||
x(cached) \
|
||||
x(with_key_cache) \
|
||||
x(with_updates) \
|
||||
x(with_journal) \
|
||||
x(snapshot_field) \
|
||||
x(all_snapshots) \
|
||||
x(filter_snapshots) \
|
||||
x(nopreserve) \
|
||||
x(cached_nofill) \
|
||||
x(key_cache_fill) \
|
||||
|
||||
#define STR_HASH_FLAGS() \
|
||||
x(must_create) \
|
||||
x(must_replace)
|
||||
|
||||
#define BTREE_UPDATE_FLAGS() \
|
||||
x(internal_snapshot_node) \
|
||||
x(nojournal) \
|
||||
x(key_cache_reclaim)
|
||||
|
||||
|
||||
/*
|
||||
* Iterate over all possible positions, synthesizing deleted keys for holes:
|
||||
* BTREE_TRIGGER_norun - don't run triggers at all
|
||||
*
|
||||
* BTREE_TRIGGER_transactional - we're running transactional triggers as part of
|
||||
* a transaction commit: triggers may generate new updates
|
||||
*
|
||||
* BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
|
||||
* commit: we have our journal reservation, we're holding btree node write
|
||||
* locks, and we know the transaction is going to commit (returning an error
|
||||
* here is a fatal error, causing us to go emergency read-only)
|
||||
*
|
||||
* BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
|
||||
*
|
||||
* BTREE_TRIGGER_insert - @new is entering the btree
|
||||
* BTREE_TRIGGER_overwrite - @old is leaving the btree
|
||||
*
|
||||
* BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
|
||||
* trigger
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
|
||||
/*
|
||||
* Indicates that intent locks should be taken on leaf nodes, because we expect
|
||||
* to be doing updates:
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 1;
|
||||
/*
|
||||
* Causes the btree iterator code to prefetch additional btree nodes from disk:
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 2;
|
||||
/*
|
||||
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
|
||||
* @pos or the first key strictly greater than @pos
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 3;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 4;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 5;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 6;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 7;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 8;
|
||||
static const __maybe_unused u16 BTREE_ITER_SNAPSHOT_FIELD = 1 << 9;
|
||||
static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
|
||||
static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 11;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 12;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 13;
|
||||
static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 14;
|
||||
#define __BTREE_ITER_FLAGS_END 15
|
||||
#define BTREE_TRIGGER_FLAGS() \
|
||||
x(norun) \
|
||||
x(transactional) \
|
||||
x(atomic) \
|
||||
x(gc) \
|
||||
x(insert) \
|
||||
x(overwrite) \
|
||||
x(bucket_invalidate)
|
||||
|
||||
enum {
|
||||
#define x(n) BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_ITER_FLAGS()
|
||||
STR_HASH_FLAGS()
|
||||
BTREE_UPDATE_FLAGS()
|
||||
BTREE_TRIGGER_FLAGS()
|
||||
#undef x
|
||||
};
|
||||
|
||||
/* iter flags must fit in a u16: */
|
||||
//BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
|
||||
|
||||
enum btree_iter_update_trigger_flags {
|
||||
#define x(n) BTREE_ITER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_ITER_FLAGS()
|
||||
#undef x
|
||||
#define x(n) STR_HASH_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
STR_HASH_FLAGS()
|
||||
#undef x
|
||||
#define x(n) BTREE_UPDATE_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_UPDATE_FLAGS()
|
||||
#undef x
|
||||
#define x(n) BTREE_TRIGGER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_TRIGGER_FLAGS()
|
||||
#undef x
|
||||
};
|
||||
|
||||
enum btree_path_uptodate {
|
||||
BTREE_ITER_UPTODATE = 0,
|
||||
@ -307,7 +358,7 @@ struct btree_iter {
|
||||
*/
|
||||
struct bkey k;
|
||||
|
||||
/* BTREE_ITER_WITH_JOURNAL: */
|
||||
/* BTREE_ITER_with_journal: */
|
||||
size_t journal_idx;
|
||||
#ifdef TRACK_PATH_ALLOCATED
|
||||
unsigned long ip_allocated;
|
||||
|
@ -25,14 +25,14 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
|
||||
struct bkey_i *, enum btree_update_flags,
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags,
|
||||
unsigned long ip);
|
||||
|
||||
static noinline int extent_front_merge(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
struct bkey_i **insert,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_i *update;
|
||||
@ -104,8 +104,8 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
|
||||
pos.snapshot++;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, btree_id, pos,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOPRESERVE, k, ret) {
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_nopreserve, k, ret) {
|
||||
if (!bkey_eq(k.k->p, pos))
|
||||
break;
|
||||
|
||||
@ -138,8 +138,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
darray_init(&s);
|
||||
|
||||
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_all_snapshots);
|
||||
while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
|
||||
!(ret = bkey_err(old_k)) &&
|
||||
bkey_eq(old_pos, old_k.k->p)) {
|
||||
@ -151,8 +151,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
continue;
|
||||
|
||||
new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(new_k);
|
||||
if (ret)
|
||||
break;
|
||||
@ -168,7 +168,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
update->k.type = KEY_TYPE_whiteout;
|
||||
|
||||
ret = bch2_trans_update(trans, &new_iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &new_iter);
|
||||
|
||||
@ -185,7 +185,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
|
||||
int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_update_flags flags,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_s_c new)
|
||||
{
|
||||
@ -218,7 +218,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||
old.k->p, update->k.p) ?:
|
||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -235,7 +235,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||
old.k->p, update->k.p) ?:
|
||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -260,7 +260,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -273,7 +273,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
bch2_cut_front(new.k->p, update);
|
||||
|
||||
ret = bch2_trans_update_by_path(trans, iter->path, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
flags, _RET_IP_);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -285,7 +285,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
struct btree_iter *orig_iter,
|
||||
struct bkey_i *insert,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
@ -293,9 +293,9 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_with_updates|
|
||||
BTREE_ITER_not_extents);
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
|
||||
if ((ret = bkey_err(k)))
|
||||
goto err;
|
||||
@ -346,7 +346,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
|
||||
static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i,
|
||||
enum btree_update_flags flags,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct bkey k;
|
||||
@ -354,7 +354,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
|
||||
btree_path_idx_t path_idx =
|
||||
bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
|
||||
BTREE_ITER_INTENT, _THIS_IP_);
|
||||
BTREE_ITER_intent, _THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, path_idx, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -372,7 +372,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
goto out;
|
||||
|
||||
i->key_cache_already_flushed = true;
|
||||
i->flags |= BTREE_TRIGGER_NORUN;
|
||||
i->flags |= BTREE_TRIGGER_norun;
|
||||
|
||||
btree_path_set_should_be_locked(btree_path);
|
||||
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
|
||||
@ -383,7 +383,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
struct bkey_i *k, enum btree_update_flags flags,
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -479,15 +479,15 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
|
||||
if (!iter->key_cache_path)
|
||||
iter->key_cache_path =
|
||||
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_CACHED, _THIS_IP_);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_cached, _THIS_IP_);
|
||||
|
||||
iter->key_cache_path =
|
||||
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@ -505,17 +505,17 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
btree_path_idx_t path_idx = iter->update_path ?: iter->path;
|
||||
int ret;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter->flags & BTREE_ITER_is_extents)
|
||||
return bch2_trans_update_extent(trans, iter, k, flags);
|
||||
|
||||
if (bkey_deleted(&k->k) &&
|
||||
!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
|
||||
(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
|
||||
!(flags & BTREE_UPDATE_key_cache_reclaim) &&
|
||||
(iter->flags & BTREE_ITER_filter_snapshots)) {
|
||||
ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
@ -528,7 +528,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
|
||||
* Ensure that updates to cached btrees go to the key cache:
|
||||
*/
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
|
||||
if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
|
||||
!path->cached &&
|
||||
!path->level &&
|
||||
btree_id_cached(trans->c, path->btree_id)) {
|
||||
@ -587,7 +587,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_prev(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -621,15 +621,15 @@ void bch2_trans_commit_hook(struct btree_trans *trans,
|
||||
|
||||
int bch2_btree_insert_nonextent(struct btree_trans *trans,
|
||||
enum btree_id btree, struct bkey_i *k,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, k->k.p,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -637,14 +637,14 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -698,8 +698,8 @@ int bch2_btree_delete(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_btree_delete_at(trans, &iter, update_flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -717,7 +717,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
|
||||
while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
|
||||
struct disk_reservation disk_res =
|
||||
bch2_disk_reservation_init(trans->c, 0);
|
||||
@ -745,7 +745,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
*/
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (iter.flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter.flags & BTREE_ITER_is_extents)
|
||||
bch2_key_resize(&delete.k,
|
||||
bpos_min(end, k.k->p).offset -
|
||||
iter.pos.offset);
|
||||
@ -804,7 +804,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
|
||||
k->k.p = pos;
|
||||
|
||||
struct btree_iter iter;
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, 0);
|
||||
|
@ -50,10 +50,10 @@ int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
|
||||
int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
|
||||
|
||||
int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
|
||||
enum btree_update_flags);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
||||
struct disk_reservation *, int flags);
|
||||
|
||||
@ -94,14 +94,14 @@ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_update_flags,
|
||||
enum btree_iter_update_trigger_flags,
|
||||
struct bkey_s_c, struct bkey_s_c);
|
||||
|
||||
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos);
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned);
|
||||
|
||||
@ -276,7 +276,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
|
||||
unsigned flags, unsigned type, unsigned min_bytes)
|
||||
{
|
||||
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
|
||||
btree_id, pos, flags|BTREE_ITER_INTENT, type);
|
||||
btree_id, pos, flags|BTREE_ITER_intent, type);
|
||||
struct bkey_i *ret = IS_ERR(k.k)
|
||||
? ERR_CAST(k.k)
|
||||
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
|
||||
@ -299,7 +299,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
|
||||
unsigned flags, unsigned type, unsigned min_bytes)
|
||||
{
|
||||
struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
|
||||
btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes);
|
||||
btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(mut))
|
||||
|
@ -44,8 +44,8 @@ static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans,
|
||||
struct bpos pos)
|
||||
{
|
||||
btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
|
||||
BTREE_ITER_NOPRESERVE|
|
||||
BTREE_ITER_INTENT, _RET_IP_);
|
||||
BTREE_ITER_nopreserve|
|
||||
BTREE_ITER_intent, _RET_IP_);
|
||||
path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
@ -664,7 +664,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
|
||||
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
|
||||
|
||||
ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -673,7 +673,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
|
||||
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
|
||||
|
||||
ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1997,7 +1997,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
: bpos_successor(b->data->max_key);
|
||||
|
||||
sib_path = bch2_path_get(trans, btree, sib_pos,
|
||||
U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
|
||||
U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, sib_path, false);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -2351,10 +2351,10 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
if (!skip_triggers) {
|
||||
ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1,
|
||||
bkey_i_to_s_c(&b->key),
|
||||
BTREE_TRIGGER_TRANSACTIONAL) ?:
|
||||
BTREE_TRIGGER_transactional) ?:
|
||||
bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1,
|
||||
bkey_i_to_s(new_key),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -2371,7 +2371,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
|
||||
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
|
||||
iter2.flags & BTREE_ITER_INTENT,
|
||||
iter2.flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
|
||||
struct btree_path *path2 = btree_iter_path(trans, &iter2);
|
||||
@ -2383,7 +2383,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
trans->paths_sorted = false;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
|
||||
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
@ -2491,7 +2491,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
|
||||
BTREE_MAX_DEPTH, b->c.level,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -122,7 +122,7 @@ static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
|
||||
trans->journal_res.seq = wb->journal_seq;
|
||||
|
||||
return bch2_trans_update(trans, iter, &wb->k,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc|
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
@ -191,13 +191,13 @@ btree_write_buffered_insert(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|BTREE_ITER_intent);
|
||||
|
||||
trans->journal_res.seq = wb->journal_seq;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, &wb->k,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
@ -332,7 +332,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
if (!iter.path || iter.btree_id != k->btree) {
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
|
||||
}
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, k->k.k.p);
|
||||
|
@ -752,16 +752,17 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry,
|
||||
s64 *sectors, unsigned flags)
|
||||
s64 *sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
|
||||
bool insert = !(flags & BTREE_TRIGGER_overwrite);
|
||||
struct bpos bucket;
|
||||
struct bch_backpointer bp;
|
||||
|
||||
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, entry, &bucket, &bp);
|
||||
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket);
|
||||
int ret = PTR_ERR_OR_ZERO(a);
|
||||
@ -784,7 +785,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
@ -820,13 +821,14 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
struct bkey_s_c k,
|
||||
struct extent_ptr_decoded p,
|
||||
enum bch_data_type data_type,
|
||||
s64 sectors, unsigned flags)
|
||||
s64 sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_stripes, POS(0, p.ec.idx),
|
||||
BTREE_ITER_WITH_UPDATES, stripe);
|
||||
BTREE_ITER_with_updates, stripe);
|
||||
int ret = PTR_ERR_OR_ZERO(s);
|
||||
if (unlikely(ret)) {
|
||||
bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
|
||||
@ -856,10 +858,10 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_gc));
|
||||
|
||||
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
|
||||
if (!m) {
|
||||
@ -895,9 +897,10 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
|
||||
static int __trigger_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bool gc = flags & BTREE_TRIGGER_GC;
|
||||
bool gc = flags & BTREE_TRIGGER_gc;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
@ -969,7 +972,7 @@ static int __trigger_extent(struct btree_trans *trans,
|
||||
int bch2_trigger_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
|
||||
struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
|
||||
@ -983,7 +986,7 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
new_ptrs_bytes))
|
||||
return 0;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bch_fs *c = trans->c;
|
||||
int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
|
||||
(int) bch2_bkey_needs_rebalance(c, old);
|
||||
@ -996,7 +999,7 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
|
||||
if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc))
|
||||
return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
|
||||
|
||||
return 0;
|
||||
@ -1005,17 +1008,17 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
/* KEY_TYPE_reservation */
|
||||
|
||||
static int __trigger_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level, struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||
s64 sectors = (s64) k.k->size * replicas;
|
||||
|
||||
if (flags & BTREE_TRIGGER_OVERWRITE)
|
||||
if (flags & BTREE_TRIGGER_overwrite)
|
||||
sectors = -sectors;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
int ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1026,7 +1029,7 @@ static int __trigger_reservation(struct btree_trans *trans,
|
||||
d->persistent_reserved[replicas - 1] += sectors;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
percpu_down_read(&c->mark_lock);
|
||||
preempt_disable();
|
||||
|
||||
@ -1046,7 +1049,7 @@ static int __trigger_reservation(struct btree_trans *trans,
|
||||
int bch2_trigger_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
@ -1092,8 +1095,8 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 b, enum bch_data_type data_type,
|
||||
unsigned sectors, unsigned flags)
|
||||
u64 b, enum bch_data_type data_type, unsigned sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bucket old, new, *g;
|
||||
int ret = 0;
|
||||
@ -1134,9 +1137,9 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
}
|
||||
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca, u64 b,
|
||||
enum bch_data_type type,
|
||||
unsigned sectors, unsigned flags)
|
||||
struct bch_dev *ca, u64 b,
|
||||
enum bch_data_type type, unsigned sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
BUG_ON(type != BCH_DATA_free &&
|
||||
type != BCH_DATA_sb &&
|
||||
@ -1148,9 +1151,9 @@ int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
if (b >= ca->mi.nbuckets)
|
||||
return 0;
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC)
|
||||
if (flags & BTREE_TRIGGER_gc)
|
||||
return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags);
|
||||
else if (flags & BTREE_TRIGGER_TRANSACTIONAL)
|
||||
else if (flags & BTREE_TRIGGER_transactional)
|
||||
return commit_do(trans, NULL, NULL, 0,
|
||||
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
|
||||
else
|
||||
@ -1158,11 +1161,9 @@ int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
u64 start, u64 end,
|
||||
enum bch_data_type type,
|
||||
u64 *bucket, unsigned *bucket_sectors,
|
||||
unsigned flags)
|
||||
struct bch_dev *ca, u64 start, u64 end,
|
||||
enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
do {
|
||||
u64 b = sector_to_bucket(ca, start);
|
||||
@ -1186,8 +1187,8 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
struct bch_dev *ca, unsigned flags)
|
||||
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
||||
u64 bucket = 0;
|
||||
@ -1230,7 +1231,8 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, unsigned flags)
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
__bch2_trans_mark_dev_sb(trans, ca, flags));
|
||||
@ -1238,7 +1240,8 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, unsigned flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, unsigned flags)
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
for_each_online_member(c, ca) {
|
||||
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
|
||||
@ -1253,7 +1256,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, unsigned flags)
|
||||
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
|
||||
{
|
||||
return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_TRANSACTIONAL);
|
||||
return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
|
||||
}
|
||||
|
||||
/* Disk reservations: */
|
||||
|
@ -338,18 +338,20 @@ int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
|
||||
s64, enum bch_data_type, u8, u8, u32);
|
||||
|
||||
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
|
||||
({ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (_old.k->type) \
|
||||
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
|
||||
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
|
||||
if (!ret && _new.k->type) \
|
||||
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
|
||||
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
|
||||
ret; \
|
||||
})
|
||||
|
||||
@ -359,9 +361,12 @@ void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list
|
||||
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
|
||||
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
|
||||
enum bch_data_type, unsigned, unsigned);
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *, unsigned);
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, unsigned);
|
||||
enum bch_data_type, unsigned,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *);
|
||||
|
||||
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
|
||||
|
@ -106,7 +106,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, m->btree_id,
|
||||
bkey_start_pos(&bch2_keylist_front(keys)->k),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
@ -288,7 +288,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
|
||||
k.k->p, insert->k.p) ?:
|
||||
bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
|
||||
bch2_trans_update(trans, &iter, insert,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, &op->res,
|
||||
NULL,
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
@ -387,7 +387,7 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
|
||||
unsigned sectors = bio_sectors(bio);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
ret = lockrestart_do(trans, ({
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
bkey_err(k);
|
||||
@ -480,15 +480,15 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
|
||||
|
||||
/*
|
||||
* Since we're not inserting through an extent iterator
|
||||
* (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
|
||||
* (BTREE_ITER_all_snapshots iterators aren't extent iterators),
|
||||
* we aren't using the extent overwrite path to delete, we're
|
||||
* just using the normal key deletion path:
|
||||
*/
|
||||
if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
|
||||
n->k.size = 0;
|
||||
|
||||
return bch2_trans_relock(trans) ?:
|
||||
bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}
|
||||
|
||||
|
@ -375,8 +375,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
bch2_bkey_val_to_text(&i->buf, i->c, k);
|
||||
prt_newline(&i->buf);
|
||||
bch2_trans_unlock(trans);
|
||||
@ -459,8 +459,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
struct btree_path_level *l =
|
||||
&btree_iter_path(trans, &iter)->l[0];
|
||||
struct bkey_packed *_k =
|
||||
|
@ -205,7 +205,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
|
||||
const struct bch_hash_info *hash_info,
|
||||
u8 type, const struct qstr *name, u64 dst_inum,
|
||||
u64 *dir_offset,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
|
||||
struct bkey_i_dirent *dirent;
|
||||
@ -220,9 +220,8 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
|
||||
dirent->k.p.snapshot = snapshot;
|
||||
|
||||
ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
|
||||
dir_inum, snapshot,
|
||||
&dirent->k_i, str_hash_flags,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
dir_inum, snapshot, &dirent->k_i,
|
||||
flags|BTREE_UPDATE_internal_snapshot_node);
|
||||
*dir_offset = dirent->k.p.offset;
|
||||
|
||||
return ret;
|
||||
@ -232,7 +231,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
|
||||
const struct bch_hash_info *hash_info,
|
||||
u8 type, const struct qstr *name, u64 dst_inum,
|
||||
u64 *dir_offset,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i_dirent *dirent;
|
||||
int ret;
|
||||
@ -243,7 +242,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
|
||||
return ret;
|
||||
|
||||
ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
|
||||
dir, &dirent->k_i, str_hash_flags);
|
||||
dir, &dirent->k_i, flags);
|
||||
*dir_offset = dirent->k.p.offset;
|
||||
|
||||
return ret;
|
||||
@ -272,7 +271,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
|
||||
} else {
|
||||
target->subvol = le32_to_cpu(d.v->d_child_subvol);
|
||||
|
||||
ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_CACHED, &s);
|
||||
ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s);
|
||||
|
||||
target->inum = le64_to_cpu(s.inode);
|
||||
}
|
||||
@ -303,7 +302,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
/* Lookup src: */
|
||||
old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
|
||||
src_hash, src_dir, src_name,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old_src);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -327,7 +326,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
} else {
|
||||
old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
|
||||
dst_hash, dst_dir, dst_name,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old_dst);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -442,7 +441,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
if (delete_src) {
|
||||
bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
|
||||
ret = bch2_btree_iter_traverse(&src_iter) ?:
|
||||
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -450,7 +449,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
if (delete_dst) {
|
||||
bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
|
||||
ret = bch2_btree_iter_traverse(&dst_iter) ?:
|
||||
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -38,11 +38,11 @@ int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
|
||||
int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
|
||||
const struct bch_hash_info *, u8,
|
||||
const struct qstr *, u64, u64 *,
|
||||
bch_str_hash_flags_t);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_dirent_create(struct btree_trans *, subvol_inum,
|
||||
const struct bch_hash_info *, u8,
|
||||
const struct qstr *, u64, u64 *,
|
||||
bch_str_hash_flags_t);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
static inline unsigned vfs_d_type(unsigned type)
|
||||
{
|
||||
|
@ -244,7 +244,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
|
||||
static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct bkey_s_c k,
|
||||
unsigned ptr_idx,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
||||
@ -258,7 +258,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_gc));
|
||||
|
||||
/* * XXX doesn't handle deletion */
|
||||
|
||||
@ -302,7 +302,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s _new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_s_c new = _new.s_c;
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -312,7 +312,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
||||
? bkey_s_c_to_stripe(new).v : NULL;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
/*
|
||||
* If the pointers aren't changing, we don't need to do anything:
|
||||
*/
|
||||
@ -371,7 +371,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_ATOMIC) {
|
||||
if (flags & BTREE_TRIGGER_atomic) {
|
||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||
|
||||
if (!m) {
|
||||
@ -410,7 +410,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct gc_stripe *m =
|
||||
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
||||
|
||||
@ -769,7 +769,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
|
||||
POS(0, idx), BTREE_ITER_SLOTS);
|
||||
POS(0, idx), BTREE_ITER_slots);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1060,7 +1060,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1131,7 +1131,7 @@ static int ec_stripe_key_update(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
|
||||
new->k.p, BTREE_ITER_INTENT);
|
||||
new->k.p, BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1189,7 +1189,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
int ret, dev, block;
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
bp_pos, &bp, BTREE_ITER_CACHED);
|
||||
bp_pos, &bp, BTREE_ITER_cached);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bpos_eq(*bp_pos, SPOS_MAX))
|
||||
@ -1214,7 +1214,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
|
||||
k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1937,7 +1937,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
|
||||
}
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
|
||||
if (start_pos.offset) {
|
||||
start_pos = min_pos;
|
||||
@ -2127,7 +2127,7 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
if (k.k->type != KEY_TYPE_stripe)
|
||||
continue;
|
||||
|
||||
|
@ -13,7 +13,8 @@ int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
|
||||
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_stripe_invalid, \
|
||||
|
@ -72,7 +72,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter,
|
||||
BTREE_ID_reflink, POS(0, idx + offset),
|
||||
BTREE_ITER_SLOTS, r_k, ret2) {
|
||||
BTREE_ITER_slots, r_k, ret2) {
|
||||
if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
|
||||
break;
|
||||
|
||||
|
@ -42,7 +42,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -70,7 +70,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
struct bch_subvolume s;
|
||||
|
||||
ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
|
||||
BTREE_ITER_CACHED, &s);
|
||||
BTREE_ITER_cached, &s);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -78,7 +78,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -163,7 +163,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
name,
|
||||
dir_target,
|
||||
&dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -171,7 +171,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
new_inode->bi_dir_offset = dir_offset;
|
||||
}
|
||||
|
||||
inode_iter.flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
|
||||
bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&inode_iter) ?:
|
||||
@ -198,7 +198,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
if (dir.subvol != inum.subvol)
|
||||
return -EXDEV;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -207,7 +207,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -223,7 +223,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
ret = bch2_dirent_create(trans, dir, &dir_hash,
|
||||
mode_to_type(inode_u->bi_mode),
|
||||
name, inum.inum, &dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -255,19 +255,19 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
|
||||
ret = bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
|
||||
name, &inum, BTREE_ITER_INTENT);
|
||||
name, &inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -322,7 +322,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash, &dirent_iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_inode_write(trans, &dir_iter, dir_u) ?:
|
||||
bch2_inode_write(trans, &inode_iter, inode_u);
|
||||
err:
|
||||
@ -363,7 +363,7 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p
|
||||
struct bkey_i_subvolume *s =
|
||||
bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvol),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
int ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -394,7 +394,7 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -403,7 +403,7 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
if (dst_dir.inum != src_dir.inum ||
|
||||
dst_dir.subvol != src_dir.subvol) {
|
||||
ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -423,13 +423,13 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (dst_inum.inum) {
|
||||
ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static void bchfs_read(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
|
@ -254,7 +254,7 @@ static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_SLOTS, k, err) {
|
||||
BTREE_ITER_slots, k, err) {
|
||||
if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
|
||||
break;
|
||||
|
||||
|
@ -214,7 +214,7 @@ int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
|
||||
unsigned state = bkey_to_sector_state(k);
|
||||
|
||||
|
@ -594,7 +594,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inode->v.i_ino, start_sector),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
while (!ret && bkey_lt(iter.pos, end_pos)) {
|
||||
s64 i_sectors_delta = 0;
|
||||
@ -1009,7 +1009,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inode->v.i_ino, offset >> 9, snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
if (k.k->p.inode != inode->v.i_ino) {
|
||||
next_hole = bch2_seek_pagecache_hole(&inode->v,
|
||||
offset, MAX_LFS_FILESIZE, 0, false);
|
||||
|
@ -90,7 +90,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT) ?:
|
||||
BTREE_ITER_intent) ?:
|
||||
(set ? set(trans, inode, &inode_u, p) : 0) ?:
|
||||
bch2_inode_write(trans, &iter, &inode_u) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
@ -323,7 +323,7 @@ __bch2_create(struct mnt_idmap *idmap,
|
||||
inum.inum = inode_u.bi_inum;
|
||||
|
||||
ret = bch2_subvolume_get(trans, inum.subvol, true,
|
||||
BTREE_ITER_WITH_UPDATES, &subvol) ?:
|
||||
BTREE_ITER_with_updates, &subvol) ?:
|
||||
bch2_trans_commit(trans, NULL, &journal_seq, 0);
|
||||
if (unlikely(ret)) {
|
||||
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
|
||||
@ -783,7 +783,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
|
||||
acl = NULL;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
|
@ -79,7 +79,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
|
||||
POS(0, inode_nr),
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -154,12 +154,12 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||
|
||||
dir_hash_info = bch2_hash_info_init(c, &dir_inode);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash_info, &iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err:
|
||||
bch_err_fn(c, ret);
|
||||
@ -274,9 +274,9 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
|
||||
&lostfound_str,
|
||||
lostfound->bi_inum,
|
||||
&lostfound->bi_dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE) ?:
|
||||
STR_HASH_must_create) ?:
|
||||
bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
err:
|
||||
bch_err_msg(c, ret, "creating lost+found");
|
||||
bch2_trans_iter_exit(trans, &lostfound_iter);
|
||||
@ -333,7 +333,7 @@ static int reattach_inode(struct btree_trans *trans,
|
||||
&name,
|
||||
inode->bi_subvol ?: inode->bi_inum,
|
||||
&dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -708,7 +708,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
BTREE_ITER_all_snapshots, k, ret) {
|
||||
if (k.k->p.offset != inum)
|
||||
break;
|
||||
|
||||
@ -799,7 +799,7 @@ static int __get_visible_inodes(struct btree_trans *trans,
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
BTREE_ITER_all_snapshots, k, ret) {
|
||||
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
|
||||
if (k.k->p.offset != inum)
|
||||
@ -832,7 +832,7 @@ static int check_key_has_snapshot(struct btree_trans *trans,
|
||||
"key in missing snapshot: %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
|
||||
BTREE_UPDATE_internal_snapshot_node) ?: 1;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
@ -861,8 +861,8 @@ static int hash_redo_key(struct btree_trans *trans,
|
||||
bch2_hash_set_in_snapshot(trans, desc, hash_info,
|
||||
(subvol_inum) { 0, k.k->p.inode },
|
||||
k.k->p.snapshot, tmp,
|
||||
BCH_HASH_SET_MUST_CREATE,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
STR_HASH_must_create|
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}
|
||||
|
||||
@ -891,7 +891,7 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, desc.btree_id,
|
||||
SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
if (bkey_eq(k.k->p, hash_k.k->p))
|
||||
break;
|
||||
|
||||
@ -1233,7 +1233,7 @@ int bch2_check_inodes(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_inode(trans, &iter, k, &prev, &s, full)));
|
||||
|
||||
@ -1362,8 +1362,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
|
||||
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
|
||||
|
||||
bch2_trans_iter_init(trans, &iter1, btree, pos1,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_not_extents);
|
||||
k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
|
||||
ret = bkey_err(k1);
|
||||
if (ret)
|
||||
@ -1425,7 +1425,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
|
||||
trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
|
||||
|
||||
ret = bch2_trans_update_extent_overwrite(trans, old_iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
|
||||
BTREE_UPDATE_internal_snapshot_node,
|
||||
k1, k2) ?:
|
||||
bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -1625,7 +1625,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_btree_delete_at(trans, &iter2,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter2);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1652,7 +1652,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
delete:
|
||||
ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1673,7 +1673,7 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -1698,7 +1698,7 @@ int bch2_check_indirect_extents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
&res, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -2104,7 +2104,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2191,7 +2191,7 @@ int bch2_check_dirents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -2255,7 +2255,7 @@ int bch2_check_xattrs(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -2422,7 +2422,7 @@ int bch2_check_subvolume_structure(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol_path(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -2559,9 +2559,9 @@ int bch2_check_directory_structure(struct bch_fs *c)
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
if (!bkey_is_inode(k.k))
|
||||
continue;
|
||||
@ -2661,9 +2661,9 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, start),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
if (!bkey_is_inode(k.k))
|
||||
continue;
|
||||
|
||||
@ -2704,9 +2704,9 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
|
||||
if (ret)
|
||||
break;
|
||||
@ -2781,7 +2781,7 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, range_start),
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
|
||||
if (ret < 0) {
|
||||
@ -2849,7 +2849,7 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
u->v.front_pad = 0;
|
||||
u->v.back_pad = 0;
|
||||
|
||||
return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
|
||||
return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
|
||||
}
|
||||
|
||||
int bch2_fix_reflink_p(struct bch_fs *c)
|
||||
@ -2860,8 +2860,8 @@ int bch2_fix_reflink_p(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
fix_reflink_p_key(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
|
@ -339,7 +339,7 @@ int bch2_inode_peek_nowarn(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum.inum, snapshot),
|
||||
flags|BTREE_ITER_CACHED);
|
||||
flags|BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -371,7 +371,7 @@ int bch2_inode_peek(struct btree_trans *trans,
|
||||
int bch2_inode_write_flags(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bch_inode_unpacked *inode,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_inode_buf *inode_p;
|
||||
|
||||
@ -399,7 +399,7 @@ int __bch2_fsck_write_inode(struct btree_trans *trans,
|
||||
|
||||
return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
|
||||
&inode_p->inode.k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
|
||||
int bch2_fsck_write_inode(struct btree_trans *trans,
|
||||
@ -598,7 +598,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
|
||||
{
|
||||
s64 nr = (s64) bkey_is_inode(new.k) - (s64) bkey_is_inode(old.k);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
if (nr) {
|
||||
int ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
if (ret)
|
||||
@ -617,13 +617,13 @@ int bch2_trigger_inode(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
|
||||
BUG_ON(!trans->journal_res.seq);
|
||||
|
||||
bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
@ -752,8 +752,8 @@ int bch2_inode_create(struct btree_trans *trans,
|
||||
|
||||
pos = start;
|
||||
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_intent);
|
||||
again:
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k)) &&
|
||||
@ -814,7 +814,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
* extent iterator:
|
||||
*/
|
||||
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
@ -836,7 +836,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (iter.flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter.flags & BTREE_ITER_is_extents)
|
||||
bch2_key_resize(&delete.k,
|
||||
bpos_min(end, k.k->p).offset -
|
||||
iter.pos.offset);
|
||||
@ -885,7 +885,7 @@ int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum.inum, snapshot),
|
||||
BTREE_ITER_INTENT|BTREE_ITER_CACHED);
|
||||
BTREE_ITER_intent|BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1045,7 +1045,7 @@ int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
|
||||
SPOS(0, inum, snapshot), BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1090,7 +1090,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
|
||||
struct bch_inode_unpacked inode;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
|
||||
k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1142,7 +1142,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
|
||||
inode.bi_flags &= ~BCH_INODE_unlinked;
|
||||
|
||||
ret = bch2_inode_write_flags(trans, &inode_iter, &inode,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch_err_msg(c, ret, "clearing inode unlinked flag");
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1189,7 +1189,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
|
||||
* flushed and we'd spin:
|
||||
*/
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass);
|
||||
if (ret > 0) {
|
||||
|
@ -101,7 +101,7 @@ int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
|
||||
struct bch_inode_unpacked *, subvol_inum, unsigned);
|
||||
|
||||
int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
|
||||
struct bch_inode_unpacked *, enum btree_update_flags);
|
||||
struct bch_inode_unpacked *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
static inline int bch2_inode_write(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
|
@ -198,7 +198,7 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inum.inum, start),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
|
||||
|
||||
@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
|
||||
struct bch_inode_unpacked inode_u;
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent) ?:
|
||||
(inode_u.bi_size = new_i_size, 0) ?:
|
||||
bch2_inode_write(trans, &iter, &inode_u);
|
||||
|
||||
@ -256,7 +256,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
|
||||
POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
|
||||
bch2_trans_iter_exit(trans, &fpunch_iter);
|
||||
|
||||
@ -317,7 +317,7 @@ static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset
|
||||
offset <<= 9;
|
||||
len <<= 9;
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -365,7 +365,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inum.inum, 0),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
switch (op->v.state) {
|
||||
case LOGGED_OP_FINSERT_start:
|
||||
|
@ -378,7 +378,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
|
||||
bch2_bkey_buf_init(&sk);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, rbio->data_btree,
|
||||
rbio->read_pos, BTREE_ITER_SLOTS);
|
||||
rbio->read_pos, BTREE_ITER_slots);
|
||||
retry:
|
||||
rbio->bio.bi_status = 0;
|
||||
|
||||
@ -487,7 +487,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
if ((ret = bkey_err(k)))
|
||||
goto out;
|
||||
|
||||
@ -523,7 +523,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
goto out;
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
@ -769,7 +769,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
PTR_BUCKET_POS(c, &ptr),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
|
||||
prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
|
||||
printbuf_indent_add(&buf, 2);
|
||||
@ -1112,7 +1112,7 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
enum btree_id data_btree = BTREE_ID_extents;
|
||||
|
@ -166,7 +166,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
||||
bch2_trans_copy_iter(&iter, extent_iter);
|
||||
|
||||
for_each_btree_key_upto_continue_norestart(iter,
|
||||
new->k.p, BTREE_ITER_SLOTS, old, ret) {
|
||||
new->k.p, BTREE_ITER_slots, old, ret) {
|
||||
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
|
||||
max(bkey_start_offset(&new->k),
|
||||
bkey_start_offset(old.k));
|
||||
@ -210,14 +210,14 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
* to be journalled - if we crash, the bi_journal_seq update will be
|
||||
* lost, but that's fine.
|
||||
*/
|
||||
unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
|
||||
unsigned inode_update_flags = BTREE_UPDATE_nojournal;
|
||||
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0,
|
||||
extent_iter->pos.inode,
|
||||
extent_iter->snapshot),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
int ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
@ -259,7 +259,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &inode->k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
inode_update_flags);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -368,7 +368,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
bkey_start_pos(&sk.k->k),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
|
||||
bch2_extent_update(trans, inum, &iter, sk.k,
|
||||
@ -1158,7 +1158,7 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
|
||||
return bch2_extent_update_i_size_sectors(trans, iter,
|
||||
min(new->k.p.offset << 9, new_i_size), 0) ?:
|
||||
bch2_trans_update(trans, iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
|
||||
static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
@ -1169,7 +1169,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
for_each_keylist_key(&op->insert_keys, orig) {
|
||||
int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
|
||||
bkey_start_pos(&orig->k), orig->k.p,
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
|
||||
}));
|
||||
@ -1242,7 +1242,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(op->pos.inode, op->pos.offset, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
struct bio *bio = &op->wbio.bio;
|
||||
|
||||
|
@ -946,7 +946,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
ret = bch2_trans_run(c,
|
||||
bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
ob[nr_got]->bucket, BCH_DATA_journal,
|
||||
ca->mi.bucket_size, BTREE_TRIGGER_TRANSACTIONAL));
|
||||
ca->mi.bucket_size, BTREE_TRIGGER_transactional));
|
||||
if (ret) {
|
||||
bch2_open_bucket_put(c, ob[nr_got]);
|
||||
bch_err_msg(c, ret, "marking new journal buckets");
|
||||
@ -1027,7 +1027,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
bch2_trans_run(c,
|
||||
bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
bu[i], BCH_DATA_free, 0,
|
||||
BTREE_TRIGGER_TRANSACTIONAL));
|
||||
BTREE_TRIGGER_transactional));
|
||||
err_free:
|
||||
if (!new_fs)
|
||||
for (i = 0; i < nr_got; i++)
|
||||
|
@ -233,7 +233,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
|
||||
struct btree *b;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
|
||||
0, 0, BTREE_ITER_PREFETCH);
|
||||
0, 0, BTREE_ITER_prefetch);
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
|
@ -56,7 +56,7 @@ int bch2_resume_logged_ops(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter,
|
||||
BTREE_ID_logged_ops, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
resume_logged_op(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
@ -149,7 +149,7 @@ int bch2_check_lrus(struct bch_fs *c)
|
||||
struct bpos last_flushed_pos = POS_MIN;
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_check_lru_key(trans, &iter, k, &last_flushed_pos)));
|
||||
bch_err_fn(c, ret);
|
||||
|
@ -49,7 +49,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
if (!bch2_bkey_has_device_c(k, dev_idx))
|
||||
return 0;
|
||||
|
||||
n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_internal_snapshot_node);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -67,7 +67,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
|
||||
/*
|
||||
* Since we're not inserting through an extent iterator
|
||||
* (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
|
||||
* (BTREE_ITER_all_snapshots iterators aren't extent iterators),
|
||||
* we aren't using the extent overwrite path to delete, we're
|
||||
* just using the normal key deletion path:
|
||||
*/
|
||||
@ -87,7 +87,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
continue;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
|
||||
if (ret)
|
||||
@ -119,7 +119,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
retry:
|
||||
ret = 0;
|
||||
while (bch2_trans_begin(trans),
|
||||
|
@ -416,7 +416,7 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
|
||||
io_opts->d.nr = 0;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
if (k.k->p.offset != extent_k.k->p.inode)
|
||||
break;
|
||||
|
||||
@ -462,7 +462,7 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
return ret;
|
||||
@ -548,8 +548,8 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
|
||||
}
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree_id, start,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots);
|
||||
|
||||
if (ctxt->rate)
|
||||
bch2_ratelimit_reset(ctxt->rate);
|
||||
@ -700,7 +700,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
bucket, BTREE_ITER_CACHED);
|
||||
bucket, BTREE_ITER_cached);
|
||||
ret = lockrestart_do(trans,
|
||||
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -727,7 +727,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
&bp_pos, &bp,
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
@ -863,7 +863,7 @@ static int bch2_move_btree(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
retry:
|
||||
ret = 0;
|
||||
while (bch2_trans_begin(trans),
|
||||
|
@ -84,7 +84,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
|
||||
b->k.bucket, BTREE_ITER_CACHED);
|
||||
b->k.bucket, BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -480,7 +480,7 @@ enum fsck_err_opts {
|
||||
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
|
||||
OPT_BOOL(), \
|
||||
BCH2_NO_SB_OPT, true, \
|
||||
NULL, "BTREE_ITER_PREFETCH casuse btree nodes to be\n"\
|
||||
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
|
||||
" prefetched sequentially")
|
||||
|
||||
struct bch_opts {
|
||||
|
@ -536,10 +536,10 @@ int bch2_fs_quota_read(struct bch_fs *c)
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
__bch2_quota_set(c, k, NULL)) ?:
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
bch2_fs_quota_read_inode(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@ -826,7 +826,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
@ -42,7 +42,7 @@ static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
|
||||
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -89,7 +89,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
|
||||
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -140,7 +140,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
|
||||
bch2_trans_iter_init(trans, extent_iter,
|
||||
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
|
||||
work_pos,
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
k = bch2_btree_iter_peek_slot(extent_iter);
|
||||
if (bkey_err(k))
|
||||
return k;
|
||||
@ -328,7 +328,7 @@ static int do_rebalance(struct moving_context *ctxt)
|
||||
|
||||
bch2_trans_iter_init(trans, &rebalance_work_iter,
|
||||
BTREE_ID_rebalance_work, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
|
||||
while (!bch2_move_ratelimit(ctxt)) {
|
||||
if (!r->enabled) {
|
||||
|
@ -136,9 +136,9 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter iter;
|
||||
unsigned iter_flags =
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_NOT_EXTENTS;
|
||||
unsigned update_flags = BTREE_TRIGGER_NORUN;
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_not_extents;
|
||||
unsigned update_flags = BTREE_TRIGGER_norun;
|
||||
int ret;
|
||||
|
||||
if (k->overwritten)
|
||||
@ -147,17 +147,17 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
|
||||
trans->journal_res.seq = k->journal_seq;
|
||||
|
||||
/*
|
||||
* BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
|
||||
* BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
|
||||
* keep the key cache coherent with the underlying btree. Nothing
|
||||
* besides the allocator is doing updates yet so we don't need key cache
|
||||
* coherency for non-alloc btrees, and key cache fills for snapshots
|
||||
* btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
|
||||
* btrees use BTREE_ITER_filter_snapshots, which isn't available until
|
||||
* the snapshots recovery pass runs.
|
||||
*/
|
||||
if (!k->level && k->btree_id == BTREE_ID_alloc)
|
||||
iter_flags |= BTREE_ITER_CACHED;
|
||||
iter_flags |= BTREE_ITER_cached;
|
||||
else
|
||||
update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
|
||||
update_flags |= BTREE_UPDATE_key_cache_reclaim;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
|
||||
BTREE_MAX_DEPTH, k->level,
|
||||
|
@ -74,20 +74,20 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
}
|
||||
|
||||
static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 *idx, unsigned flags)
|
||||
struct bkey_s_c_reflink_p p, u64 *idx,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_i *k;
|
||||
__le64 *refcount;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_mut_noupdate(trans, &iter,
|
||||
BTREE_ID_reflink, POS(0, *idx),
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_with_updates);
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -102,7 +102,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
|
||||
if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
|
||||
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
||||
bch2_trans_inconsistent(trans,
|
||||
"indirect extent refcount underflow at %llu while marking\n %s",
|
||||
@ -111,7 +111,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
if (flags & BTREE_TRIGGER_insert) {
|
||||
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
|
||||
u64 pad;
|
||||
|
||||
@ -141,12 +141,13 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 *idx, unsigned flags, size_t r_idx)
|
||||
struct bkey_s_c_reflink_p p, u64 *idx,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
size_t r_idx)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct reflink_gc *r;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
|
||||
u64 start = le64_to_cpu(p.v->idx);
|
||||
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
|
||||
u64 next_idx = end + le32_to_cpu(p.v->back_pad);
|
||||
@ -189,7 +190,7 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
set_bkey_val_u64s(&update->k, 0);
|
||||
}
|
||||
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_NORUN);
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun);
|
||||
}
|
||||
|
||||
*idx = next_idx;
|
||||
@ -200,8 +201,8 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static int __trigger_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level, struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
||||
@ -210,12 +211,12 @@ static int __trigger_reflink_p(struct btree_trans *trans,
|
||||
u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
|
||||
u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
while (idx < end && !ret)
|
||||
ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
size_t l = 0, r = c->reflink_gc_nr;
|
||||
|
||||
while (l < r) {
|
||||
@ -238,10 +239,10 @@ int bch2_trigger_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
|
||||
(flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_transactional) &&
|
||||
(flags & BTREE_TRIGGER_insert)) {
|
||||
struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
|
||||
|
||||
v->front_pad = v->back_pad = 0;
|
||||
@ -283,21 +284,21 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
|
||||
static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *flags)
|
||||
{
|
||||
if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
|
||||
if ((*flags & BTREE_TRIGGER_insert) && !*bkey_refcount(new)) {
|
||||
new.k->type = KEY_TYPE_deleted;
|
||||
new.k->size = 0;
|
||||
set_bkey_val_u64s(new.k, 0);
|
||||
*flags &= ~BTREE_TRIGGER_INSERT;
|
||||
*flags &= ~BTREE_TRIGGER_insert;
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_trigger_reflink_v(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
|
||||
(flags & BTREE_TRIGGER_INSERT))
|
||||
if ((flags & BTREE_TRIGGER_transactional) &&
|
||||
(flags & BTREE_TRIGGER_insert))
|
||||
check_indirect_extent_deleting(new, &flags);
|
||||
|
||||
return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
|
||||
@ -349,7 +350,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
|
||||
|
||||
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_prev(&reflink_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -394,7 +395,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
|
||||
|
||||
ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &reflink_iter);
|
||||
|
||||
@ -455,9 +456,9 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
goto err;
|
||||
|
||||
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
while ((ret == 0 ||
|
||||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
|
||||
@ -567,7 +568,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
|
||||
dst_inum, BTREE_ITER_INTENT);
|
||||
dst_inum, BTREE_ITER_intent);
|
||||
|
||||
if (!ret2 &&
|
||||
inode_u.bi_size < new_i_size) {
|
||||
|
@ -10,7 +10,8 @@ void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_reflink_p_invalid, \
|
||||
@ -25,7 +26,8 @@ int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
|
||||
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_reflink_v_invalid, \
|
||||
|
@ -49,7 +49,7 @@ int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
|
||||
struct bch_snapshot_tree *s)
|
||||
{
|
||||
int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
|
||||
BTREE_ITER_with_updates, snapshot_tree, s);
|
||||
|
||||
if (bch2_err_matches(ret, ENOENT))
|
||||
ret = -BCH_ERR_ENOENT_snapshot_tree;
|
||||
@ -361,7 +361,7 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
|
||||
struct bch_snapshot *s)
|
||||
{
|
||||
return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot, s);
|
||||
BTREE_ITER_with_updates, snapshot, s);
|
||||
}
|
||||
|
||||
static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
|
||||
@ -618,7 +618,7 @@ int bch2_check_snapshot_trees(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_snapshot_trees, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot_tree(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -695,7 +695,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
|
||||
|
||||
root = bch2_bkey_get_iter_typed(trans, &root_iter,
|
||||
BTREE_ID_snapshots, POS(0, root_id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot);
|
||||
BTREE_ITER_with_updates, snapshot);
|
||||
ret = bkey_err(root);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -886,7 +886,7 @@ int bch2_check_snapshots(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_reverse_commit(trans, iter,
|
||||
BTREE_ID_snapshots, POS_MAX,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -1001,7 +1001,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
|
||||
r.btree = btree;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, btree, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
|
||||
get_snapshot_trees(c, &r, k.k->p);
|
||||
}));
|
||||
if (ret)
|
||||
@ -1090,7 +1090,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
int ret = 0;
|
||||
|
||||
s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
|
||||
BTREE_ITER_INTENT, snapshot);
|
||||
BTREE_ITER_intent, snapshot);
|
||||
ret = bkey_err(s);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
"missing snapshot %u", id);
|
||||
@ -1199,7 +1199,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
|
||||
POS_MIN, BTREE_ITER_INTENT);
|
||||
POS_MIN, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1367,7 +1367,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
|
||||
if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
|
||||
snapshot_list_has_id(equiv_seen, equiv)) {
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
} else {
|
||||
return snapshot_list_add(c, equiv_seen, equiv);
|
||||
}
|
||||
@ -1404,15 +1404,15 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
|
||||
new->k.p.snapshot = equiv;
|
||||
|
||||
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&new_iter) ?:
|
||||
bch2_trans_update(trans, &new_iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &new_iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1603,12 +1603,12 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
move_key_to_correct_snapshot(trans, &iter, k));
|
||||
|
||||
@ -1643,7 +1643,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
* nodes some depth fields will be off:
|
||||
*/
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
|
||||
if (ret)
|
||||
@ -1699,8 +1699,8 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_all_snapshots);
|
||||
while (1) {
|
||||
k = bch2_btree_iter_prev(&iter);
|
||||
ret = bkey_err(k);
|
||||
@ -1752,7 +1752,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
|
||||
|
||||
pos.snapshot = leaf_id;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
|
@ -15,16 +15,6 @@
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
typedef unsigned __bitwise bch_str_hash_flags_t;
|
||||
|
||||
enum bch_str_hash_flags {
|
||||
__BCH_HASH_SET_MUST_CREATE,
|
||||
__BCH_HASH_SET_MUST_REPLACE,
|
||||
};
|
||||
|
||||
#define BCH_HASH_SET_MUST_CREATE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_CREATE)
|
||||
#define BCH_HASH_SET_MUST_REPLACE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_REPLACE)
|
||||
|
||||
static inline enum bch_str_hash_type
|
||||
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
|
||||
{
|
||||
@ -165,7 +155,8 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, const void *key,
|
||||
unsigned flags, u32 snapshot)
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
u32 snapshot)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
@ -173,7 +164,7 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
|
||||
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
|
||||
POS(inum.inum, U64_MAX),
|
||||
BTREE_ITER_SLOTS|flags, k, ret) {
|
||||
BTREE_ITER_slots|flags, k, ret) {
|
||||
if (is_visible_key(desc, inum, k)) {
|
||||
if (!desc.cmp_key(k, key))
|
||||
return k;
|
||||
@ -195,7 +186,7 @@ bch2_hash_lookup(struct btree_trans *trans,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, const void *key,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
u32 snapshot;
|
||||
int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
|
||||
@ -223,7 +214,7 @@ bch2_hash_hole(struct btree_trans *trans,
|
||||
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
|
||||
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
|
||||
POS(inum.inum, U64_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret)
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
|
||||
if (!is_visible_key(desc, inum, k))
|
||||
return 0;
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
@ -245,7 +236,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
|
||||
|
||||
bch2_btree_iter_advance(&iter);
|
||||
|
||||
for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, k, ret) {
|
||||
for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
|
||||
if (k.k->type != desc.key_type &&
|
||||
k.k->type != KEY_TYPE_hash_whiteout)
|
||||
break;
|
||||
@ -267,8 +258,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, u32 snapshot,
|
||||
struct bkey_i *insert,
|
||||
bch_str_hash_flags_t str_hash_flags,
|
||||
int update_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter, slot = { NULL };
|
||||
struct bkey_s_c k;
|
||||
@ -280,7 +270,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
|
||||
snapshot),
|
||||
POS(insert->k.p.inode, U64_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
|
||||
if (is_visible_key(desc, inum, k)) {
|
||||
if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
|
||||
goto found;
|
||||
@ -289,8 +279,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!slot.path &&
|
||||
!(str_hash_flags & BCH_HASH_SET_MUST_REPLACE))
|
||||
if (!slot.path && !(flags & STR_HASH_must_replace))
|
||||
bch2_trans_copy_iter(&slot, &iter);
|
||||
|
||||
if (k.k->type != KEY_TYPE_hash_whiteout)
|
||||
@ -308,16 +297,16 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
found = true;
|
||||
not_found:
|
||||
|
||||
if (!found && (str_hash_flags & BCH_HASH_SET_MUST_REPLACE)) {
|
||||
if (!found && (flags & STR_HASH_must_replace)) {
|
||||
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
|
||||
} else if (found && (str_hash_flags & BCH_HASH_SET_MUST_CREATE)) {
|
||||
} else if (found && (flags & STR_HASH_must_create)) {
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
if (!found && slot.path)
|
||||
swap(iter, slot);
|
||||
|
||||
insert->k.p = iter.pos;
|
||||
ret = bch2_trans_update(trans, &iter, insert, update_flags);
|
||||
ret = bch2_trans_update(trans, &iter, insert, flags);
|
||||
}
|
||||
|
||||
goto out;
|
||||
@ -329,14 +318,14 @@ int bch2_hash_set(struct btree_trans *trans,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum,
|
||||
struct bkey_i *insert,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
insert->k.p.inode = inum.inum;
|
||||
|
||||
u32 snapshot;
|
||||
return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
|
||||
bch2_hash_set_in_snapshot(trans, desc, info, inum,
|
||||
snapshot, insert, str_hash_flags, 0);
|
||||
snapshot, insert, flags);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -344,7 +333,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
struct btree_iter *iter,
|
||||
unsigned update_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i *delete;
|
||||
int ret;
|
||||
@ -362,7 +351,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
|
||||
delete->k.p = iter->pos;
|
||||
delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
|
||||
|
||||
return bch2_trans_update(trans, iter, delete, update_flags);
|
||||
return bch2_trans_update(trans, iter, delete, flags);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -373,7 +362,7 @@ int bch2_hash_delete(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
int ret = bkey_err(k) ?:
|
||||
bch2_hash_delete_at(trans, desc, info, &iter, 0);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
@ -162,7 +162,7 @@ int bch2_check_subvols(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -198,7 +198,7 @@ int bch2_check_subvol_children(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol_child(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -247,7 +247,7 @@ int bch2_subvolume_trigger(struct btree_trans *trans,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
{
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bpos children_pos_old = subvolume_children_pos(old);
|
||||
struct bpos children_pos_new = subvolume_children_pos(new.s_c);
|
||||
|
||||
@ -333,7 +333,7 @@ int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
|
||||
|
||||
subvol = bch2_bkey_get_iter_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_WITH_UPDATES,
|
||||
BTREE_ITER_cached|BTREE_ITER_with_updates,
|
||||
subvolume);
|
||||
ret = bkey_err(subvol);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -383,9 +383,9 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d
|
||||
|
||||
return lockrestart_do(trans,
|
||||
bch2_subvolume_get(trans, subvolid_to_delete, true,
|
||||
BTREE_ITER_CACHED, &s)) ?:
|
||||
BTREE_ITER_cached, &s)) ?:
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_subvolume_reparent(trans, &iter, k,
|
||||
subvolid_to_delete, le32_to_cpu(s.creation_parent)));
|
||||
@ -404,7 +404,7 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
|
||||
|
||||
subvol = bch2_bkey_get_iter_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT,
|
||||
BTREE_ITER_cached|BTREE_ITER_intent,
|
||||
subvolume);
|
||||
ret = bkey_err(subvol);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -505,7 +505,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
|
||||
|
||||
n = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -547,7 +547,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
|
||||
|
||||
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
|
||||
BTREE_ID_subvolumes, POS(0, src_subvolid),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(src_subvol);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
|
@ -1600,17 +1600,17 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
* with bch2_do_invalidates() and bch2_do_discards()
|
||||
*/
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL);
|
||||
BTREE_TRIGGER_norun, NULL);
|
||||
bch_err_msg(c, ret, "removing dev alloc info");
|
||||
return ret;
|
||||
}
|
||||
@ -1822,7 +1822,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
|
||||
bch2_dev_usage_journal_reserve(c);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
bch_err_msg(ca, ret, "marking new superblock");
|
||||
if (ret)
|
||||
goto err_late;
|
||||
@ -1887,7 +1887,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
|
||||
|
||||
ca = bch_dev_locked(c, dev_idx);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1980,7 +1980,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -274,7 +274,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
|
||||
continue;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, id, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct bch_extent_crc_unpacked crc;
|
||||
const union bch_extent_entry *entry;
|
||||
|
@ -40,7 +40,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
@ -81,7 +81,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
@ -261,7 +261,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
BTREE_ITER_slots, k, ({
|
||||
if (i >= nr * 2)
|
||||
break;
|
||||
|
||||
@ -322,7 +322,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
BTREE_ITER_slots, k, ({
|
||||
if (i == nr)
|
||||
break;
|
||||
BUG_ON(bkey_deleted(k.k) != !(i % 16));
|
||||
@ -452,7 +452,7 @@ static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start,
|
||||
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
|
||||
BTREE_UPDATE_internal_snapshot_node));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -671,7 +671,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -714,7 +714,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k,
|
||||
NULL, NULL, 0, ({
|
||||
if (iter.pos.offset >= nr)
|
||||
break;
|
||||
@ -737,7 +737,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, 0, ({
|
||||
struct bkey_i_cookie u;
|
||||
|
||||
|
@ -173,7 +173,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
int ret;
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
|
||||
bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -208,8 +208,8 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
|
||||
ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
|
||||
inum, &xattr->k_i,
|
||||
(flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
|
||||
(flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
|
||||
(flags & XATTR_CREATE ? STR_HASH_must_create : 0)|
|
||||
(flags & XATTR_REPLACE ? STR_HASH_must_replace : 0));
|
||||
} else {
|
||||
struct xattr_search_key search =
|
||||
X_SEARCH(type, name, strlen(name));
|
||||
|
Loading…
Reference in New Issue
Block a user