mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
bcachefs: Refactor various code to not be extent specific
With reflink, various code now has to handle both KEY_TYPE_extent or KEY_TYPE_reflink_v - so, convert it to be generic across all keys with pointers. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b1c9358a25
commit
99aaf57000
@ -58,7 +58,7 @@ static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
|
||||
k->u64s = BKEY_U64s + DIV_ROUND_UP(bytes, sizeof(u64));
|
||||
}
|
||||
|
||||
#define bkey_val_end(_k) vstruct_idx((_k).v, bkey_val_u64s((_k).k))
|
||||
#define bkey_val_end(_k) ((void *) (((u64 *) (_k).v) + bkey_val_u64s((_k).k)))
|
||||
|
||||
#define bkey_deleted(_k) ((_k)->type == KEY_TYPE_deleted)
|
||||
|
||||
|
@ -162,19 +162,20 @@ static int extent_matches_stripe(struct bch_fs *c,
|
||||
struct bch_stripe *v,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bkey_s_c_extent e;
|
||||
const struct bch_extent_ptr *ptr;
|
||||
int idx;
|
||||
|
||||
if (!bkey_extent_is_data(k.k))
|
||||
return -1;
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_extent: {
|
||||
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
int idx;
|
||||
|
||||
e = bkey_s_c_to_extent(k);
|
||||
|
||||
extent_for_each_ptr(e, ptr) {
|
||||
idx = ptr_matches_stripe(c, v, ptr);
|
||||
if (idx >= 0)
|
||||
return idx;
|
||||
extent_for_each_ptr(e, ptr) {
|
||||
idx = ptr_matches_stripe(c, v, ptr);
|
||||
if (idx >= 0)
|
||||
return idx;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
@ -182,19 +183,20 @@ static int extent_matches_stripe(struct bch_fs *c,
|
||||
|
||||
static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
|
||||
{
|
||||
struct bkey_s_c_extent e;
|
||||
const union bch_extent_entry *entry;
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_extent: {
|
||||
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
|
||||
const union bch_extent_entry *entry;
|
||||
|
||||
if (!bkey_extent_is_data(k.k))
|
||||
return false;
|
||||
extent_for_each_entry(e, entry)
|
||||
if (extent_entry_type(entry) ==
|
||||
BCH_EXTENT_ENTRY_stripe_ptr &&
|
||||
entry->stripe_ptr.idx == idx)
|
||||
return true;
|
||||
|
||||
e = bkey_s_c_to_extent(k);
|
||||
|
||||
extent_for_each_entry(e, entry)
|
||||
if (extent_entry_type(entry) ==
|
||||
BCH_EXTENT_ENTRY_stripe_ptr &&
|
||||
entry->stripe_ptr.idx == idx)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -249,6 +249,33 @@ void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
|
||||
bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
|
||||
}
|
||||
|
||||
const struct bch_extent_ptr *
|
||||
bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
if (ptr->dev == dev)
|
||||
return ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
if (bch2_dev_in_target(c, ptr->dev, target) &&
|
||||
(!ptr->cached ||
|
||||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* extent specific utility code */
|
||||
|
||||
const struct bch_extent_ptr *
|
||||
@ -279,20 +306,6 @@ bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct bch_extent_ptr *
|
||||
bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
|
||||
{
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
extent_for_each_ptr(e, ptr)
|
||||
if (bch2_dev_in_target(c, ptr->dev, target) &&
|
||||
(!ptr->cached ||
|
||||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
return ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
unsigned bch2_extent_is_compressed(struct bkey_s_c k)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
@ -313,16 +326,17 @@ unsigned bch2_extent_is_compressed(struct bkey_s_c k)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
|
||||
struct bch_extent_ptr m, u64 offset)
|
||||
bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bch_extent_ptr m, u64 offset)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
|
||||
extent_for_each_ptr_decode(e, p, entry)
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
if (p.ptr.dev == m.dev &&
|
||||
p.ptr.gen == m.gen &&
|
||||
(s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
|
||||
(s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
|
||||
(s64) m.offset - offset)
|
||||
return true;
|
||||
|
||||
@ -389,16 +403,17 @@ static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
|
||||
bch2_csum_type_is_encryption(n.csum_type);
|
||||
}
|
||||
|
||||
bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
|
||||
bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
|
||||
struct bch_extent_crc_unpacked n)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct bch_extent_crc_unpacked crc;
|
||||
const union bch_extent_entry *i;
|
||||
|
||||
if (!n.csum_type)
|
||||
return false;
|
||||
|
||||
extent_for_each_crc(e, crc, i)
|
||||
bkey_for_each_crc(k.k, ptrs, crc, i)
|
||||
if (can_narrow_crc(crc, n))
|
||||
return true;
|
||||
|
||||
@ -414,9 +429,9 @@ bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
|
||||
* currently live (so that readers won't have to bounce) while we've got the
|
||||
* checksum we need:
|
||||
*/
|
||||
bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
|
||||
struct bch_extent_crc_unpacked n)
|
||||
bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
|
||||
{
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
|
||||
struct bch_extent_crc_unpacked u;
|
||||
struct extent_ptr_decoded p;
|
||||
union bch_extent_entry *i;
|
||||
@ -424,7 +439,7 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
|
||||
|
||||
/* Find a checksum entry that covers only live data: */
|
||||
if (!n.csum_type) {
|
||||
extent_for_each_crc(extent_i_to_s(e), u, i)
|
||||
bkey_for_each_crc(&k->k, ptrs, u, i)
|
||||
if (!u.compression_type &&
|
||||
u.csum_type &&
|
||||
u.live_size == u.uncompressed_size) {
|
||||
@ -436,15 +451,15 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
|
||||
found:
|
||||
BUG_ON(n.compression_type);
|
||||
BUG_ON(n.offset);
|
||||
BUG_ON(n.live_size != e->k.size);
|
||||
BUG_ON(n.live_size != k->k.size);
|
||||
|
||||
restart_narrow_pointers:
|
||||
extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
|
||||
bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
|
||||
if (can_narrow_crc(p.crc, n)) {
|
||||
bch2_bkey_drop_ptr(extent_i_to_s(e).s, &i->ptr);
|
||||
bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
|
||||
p.ptr.offset += p.crc.offset;
|
||||
p.crc = n;
|
||||
bch2_extent_ptr_decoded_append(e, &p);
|
||||
bch2_extent_ptr_decoded_append(k, &p);
|
||||
ret = true;
|
||||
goto restart_narrow_pointers;
|
||||
}
|
||||
@ -1397,9 +1412,12 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
|
||||
#undef set_common_fields
|
||||
}
|
||||
|
||||
static void bch2_extent_crc_init(union bch_extent_crc *crc,
|
||||
struct bch_extent_crc_unpacked new)
|
||||
void bch2_extent_crc_append(struct bkey_i *k,
|
||||
struct bch_extent_crc_unpacked new)
|
||||
{
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
|
||||
union bch_extent_crc *crc = (void *) ptrs.end;
|
||||
|
||||
if (bch_crc_bytes[new.csum_type] <= 4 &&
|
||||
new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
|
||||
new.nonce <= CRC32_NONCE_MAX)
|
||||
@ -1416,54 +1434,53 @@ static void bch2_extent_crc_init(union bch_extent_crc *crc,
|
||||
BUG();
|
||||
|
||||
bch2_extent_crc_pack(crc, new);
|
||||
|
||||
k->k.u64s += extent_entry_u64s(ptrs.end);
|
||||
|
||||
EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
|
||||
}
|
||||
|
||||
void bch2_extent_crc_append(struct bkey_i_extent *e,
|
||||
struct bch_extent_crc_unpacked new)
|
||||
{
|
||||
bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
|
||||
__extent_entry_push(e);
|
||||
}
|
||||
|
||||
static inline void __extent_entry_insert(struct bkey_i_extent *e,
|
||||
static inline void __extent_entry_insert(struct bkey_i *k,
|
||||
union bch_extent_entry *dst,
|
||||
union bch_extent_entry *new)
|
||||
{
|
||||
union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
|
||||
union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
|
||||
|
||||
memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
|
||||
dst, (u64 *) end - (u64 *) dst);
|
||||
e->k.u64s += extent_entry_u64s(new);
|
||||
k->k.u64s += extent_entry_u64s(new);
|
||||
memcpy_u64s_small(dst, new, extent_entry_u64s(new));
|
||||
}
|
||||
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i *k,
|
||||
struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_extent_crc_unpacked crc = bch2_extent_crc_unpack(&e->k, NULL);
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
|
||||
struct bch_extent_crc_unpacked crc =
|
||||
bch2_extent_crc_unpack(&k->k, NULL);
|
||||
union bch_extent_entry *pos;
|
||||
unsigned i;
|
||||
|
||||
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
|
||||
pos = e->v.start;
|
||||
pos = ptrs.start;
|
||||
goto found;
|
||||
}
|
||||
|
||||
extent_for_each_crc(extent_i_to_s(e), crc, pos)
|
||||
bkey_for_each_crc(&k->k, ptrs, crc, pos)
|
||||
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
|
||||
pos = extent_entry_next(pos);
|
||||
goto found;
|
||||
}
|
||||
|
||||
bch2_extent_crc_append(e, p->crc);
|
||||
pos = extent_entry_last(extent_i_to_s(e));
|
||||
bch2_extent_crc_append(k, p->crc);
|
||||
pos = bkey_val_end(bkey_i_to_s(k));
|
||||
found:
|
||||
p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||
__extent_entry_insert(e, pos, to_entry(&p->ptr));
|
||||
__extent_entry_insert(k, pos, to_entry(&p->ptr));
|
||||
|
||||
for (i = 0; i < p->ec_nr; i++) {
|
||||
p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
|
||||
__extent_entry_insert(e, pos, to_entry(&p->ec[i]));
|
||||
__extent_entry_insert(k, pos, to_entry(&p->ec[i]));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,8 @@ struct btree_insert_entry;
|
||||
|
||||
/* extent entries: */
|
||||
|
||||
#define extent_entry_last(_e) bkey_val_end(_e)
|
||||
#define extent_entry_last(_e) \
|
||||
((typeof(&(_e).v->start[0])) bkey_val_end(_e))
|
||||
|
||||
#define entry_to_ptr(_entry) \
|
||||
({ \
|
||||
@ -258,6 +259,27 @@ out: \
|
||||
__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
|
||||
_ptr, _entry)
|
||||
|
||||
#define bkey_crc_next(_k, _start, _end, _crc, _iter) \
|
||||
({ \
|
||||
__bkey_extent_entry_for_each_from(_iter, _end, _iter) \
|
||||
if (extent_entry_is_crc(_iter)) { \
|
||||
(_crc) = bch2_extent_crc_unpack(_k, \
|
||||
entry_to_crc(_iter)); \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
(_iter) < (_end); \
|
||||
})
|
||||
|
||||
#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
|
||||
for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
|
||||
(_iter) = (_start); \
|
||||
bkey_crc_next(_k, _start, _end, _crc, _iter); \
|
||||
(_iter) = extent_entry_next(_iter))
|
||||
|
||||
#define bkey_for_each_crc(_k, _p, _crc, _iter) \
|
||||
__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
|
||||
|
||||
/* utility code common to all keys with pointers: */
|
||||
|
||||
static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
|
||||
@ -267,7 +289,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
|
||||
struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
|
||||
return (struct bkey_ptrs_c) {
|
||||
to_entry(&e.v->start[0]),
|
||||
to_entry(bkey_val_end(e))
|
||||
to_entry(extent_entry_last(e))
|
||||
};
|
||||
}
|
||||
case KEY_TYPE_extent: {
|
||||
@ -337,18 +359,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
|
||||
{
|
||||
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
bkey_for_each_ptr(p, ptr)
|
||||
if (ptr->dev == dev)
|
||||
return ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
|
||||
unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c);
|
||||
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
|
||||
@ -359,6 +369,11 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
|
||||
struct bch_io_failures *,
|
||||
struct extent_ptr_decoded *);
|
||||
|
||||
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
|
||||
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
|
||||
|
||||
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
|
||||
@ -424,15 +439,11 @@ void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
|
||||
|
||||
const struct bch_extent_ptr *
|
||||
bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
|
||||
const struct bch_extent_ptr *
|
||||
bch2_extent_has_group(struct bch_fs *, struct bkey_s_c_extent, unsigned);
|
||||
const struct bch_extent_ptr *
|
||||
bch2_extent_has_target(struct bch_fs *, struct bkey_s_c_extent, unsigned);
|
||||
|
||||
unsigned bch2_extent_is_compressed(struct bkey_s_c);
|
||||
|
||||
bool bch2_extent_matches_ptr(struct bch_fs *, struct bkey_s_c_extent,
|
||||
struct bch_extent_ptr, u64);
|
||||
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
|
||||
struct bch_extent_ptr, u64);
|
||||
|
||||
static inline bool bkey_extent_is_data(const struct bkey *k)
|
||||
{
|
||||
@ -456,15 +467,6 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool bch2_extent_is_fully_allocated(struct bkey_s_c k)
|
||||
{
|
||||
return bkey_extent_is_allocation(k.k) &&
|
||||
!bch2_extent_is_compressed(k);
|
||||
}
|
||||
|
||||
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
|
||||
/* Extent entry iteration: */
|
||||
|
||||
#define extent_for_each_entry_from(_e, _entry, _start) \
|
||||
@ -480,45 +482,18 @@ void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
#define extent_for_each_ptr(_e, _ptr) \
|
||||
__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
|
||||
|
||||
#define extent_crc_next(_e, _crc, _iter) \
|
||||
({ \
|
||||
extent_for_each_entry_from(_e, _iter, _iter) \
|
||||
if (extent_entry_is_crc(_iter)) { \
|
||||
(_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_iter));\
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
(_iter) < extent_entry_last(_e); \
|
||||
})
|
||||
|
||||
#define extent_for_each_crc(_e, _crc, _iter) \
|
||||
for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL), \
|
||||
(_iter) = (_e).v->start; \
|
||||
extent_crc_next(_e, _crc, _iter); \
|
||||
(_iter) = extent_entry_next(_iter))
|
||||
|
||||
#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
|
||||
__bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
|
||||
extent_entry_last(_e), _ptr, _entry)
|
||||
|
||||
void bch2_extent_crc_append(struct bkey_i_extent *,
|
||||
void bch2_extent_crc_append(struct bkey_i *,
|
||||
struct bch_extent_crc_unpacked);
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i_extent *,
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i *,
|
||||
struct extent_ptr_decoded *);
|
||||
|
||||
static inline void __extent_entry_push(struct bkey_i_extent *e)
|
||||
{
|
||||
union bch_extent_entry *entry = extent_entry_last(extent_i_to_s(e));
|
||||
|
||||
EBUG_ON(bkey_val_u64s(&e->k) + extent_entry_u64s(entry) >
|
||||
BKEY_EXTENT_VAL_U64s_MAX);
|
||||
|
||||
e->k.u64s += extent_entry_u64s(entry);
|
||||
}
|
||||
|
||||
bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent,
|
||||
bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
|
||||
struct bch_extent_crc_unpacked);
|
||||
bool bch2_extent_narrow_crcs(struct bkey_i_extent *, struct bch_extent_crc_unpacked);
|
||||
bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
|
||||
|
||||
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
|
||||
struct bch_extent_ptr *);
|
||||
|
@ -1041,11 +1041,11 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bool want_full_extent = false;
|
||||
|
||||
if (bkey_extent_is_data(k.k)) {
|
||||
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *i;
|
||||
struct extent_ptr_decoded p;
|
||||
|
||||
extent_for_each_ptr_decode(e, p, i)
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, i)
|
||||
want_full_extent |= ((p.crc.csum_type != 0) |
|
||||
(p.crc.compression_type != 0));
|
||||
}
|
||||
|
@ -1148,15 +1148,15 @@ static int bch2_tmpfile(struct mnt_idmap *idmap,
|
||||
}
|
||||
|
||||
static int bch2_fill_extent(struct fiemap_extent_info *info,
|
||||
const struct bkey_i *k, unsigned flags)
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
if (bkey_extent_is_data(&k->k)) {
|
||||
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
|
||||
if (bkey_extent_is_data(k.k)) {
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
int ret;
|
||||
|
||||
extent_for_each_ptr_decode(e, p, entry) {
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
||||
int flags2 = 0;
|
||||
u64 offset = p.ptr.offset;
|
||||
|
||||
@ -1166,22 +1166,22 @@ static int bch2_fill_extent(struct fiemap_extent_info *info,
|
||||
offset += p.crc.offset;
|
||||
|
||||
if ((offset & (PAGE_SECTORS - 1)) ||
|
||||
(e.k->size & (PAGE_SECTORS - 1)))
|
||||
(k.k->size & (PAGE_SECTORS - 1)))
|
||||
flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
|
||||
|
||||
ret = fiemap_fill_next_extent(info,
|
||||
bkey_start_offset(e.k) << 9,
|
||||
bkey_start_offset(k.k) << 9,
|
||||
offset << 9,
|
||||
e.k->size << 9, flags|flags2);
|
||||
k.k->size << 9, flags|flags2);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (k->k.type == KEY_TYPE_reservation) {
|
||||
} else if (k.k->type == KEY_TYPE_reservation) {
|
||||
return fiemap_fill_next_extent(info,
|
||||
bkey_start_offset(&k->k) << 9,
|
||||
0, k->k.size << 9,
|
||||
bkey_start_offset(k.k) << 9,
|
||||
0, k.k->size << 9,
|
||||
flags|
|
||||
FIEMAP_EXTENT_DELALLOC|
|
||||
FIEMAP_EXTENT_UNWRITTEN);
|
||||
@ -1198,7 +1198,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
BKEY_PADDED(k) tmp;
|
||||
BKEY_PADDED(k) cur, prev;
|
||||
bool have_extent = false;
|
||||
int ret = 0;
|
||||
|
||||
@ -1212,25 +1212,31 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
|
||||
POS(ei->v.i_ino, start >> 9), 0, k, ret)
|
||||
POS(ei->v.i_ino, start >> 9), 0, k, ret) {
|
||||
if (bkey_cmp(bkey_start_pos(k.k),
|
||||
POS(ei->v.i_ino, (start + len) >> 9)) >= 0)
|
||||
break;
|
||||
|
||||
bkey_reassemble(&cur.k, k);
|
||||
k = bkey_i_to_s_c(&cur.k);
|
||||
|
||||
if (bkey_extent_is_data(k.k) ||
|
||||
k.k->type == KEY_TYPE_reservation) {
|
||||
if (bkey_cmp(bkey_start_pos(k.k),
|
||||
POS(ei->v.i_ino, (start + len) >> 9)) >= 0)
|
||||
break;
|
||||
|
||||
if (have_extent) {
|
||||
ret = bch2_fill_extent(info, &tmp.k, 0);
|
||||
ret = bch2_fill_extent(info,
|
||||
bkey_i_to_s_c(&prev.k), 0);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
bkey_reassemble(&tmp.k, k);
|
||||
bkey_copy(&prev.k, &cur.k);
|
||||
have_extent = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && have_extent)
|
||||
ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST);
|
||||
ret = bch2_fill_extent(info, bkey_i_to_s_c(&prev.k),
|
||||
FIEMAP_EXTENT_LAST);
|
||||
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
return ret < 0 ? ret : 0;
|
||||
|
@ -431,7 +431,7 @@ static void init_append_extent(struct bch_write_op *op,
|
||||
if (crc.csum_type ||
|
||||
crc.compression_type ||
|
||||
crc.nonce)
|
||||
bch2_extent_crc_append(e, crc);
|
||||
bch2_extent_crc_append(&e->k_i, crc);
|
||||
|
||||
bch2_alloc_sectors_append_ptrs(op->c, wp, &e->k_i, crc.compressed_size);
|
||||
|
||||
@ -962,17 +962,13 @@ static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bch_io_opts opts,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!bkey_extent_is_data(k.k))
|
||||
return false;
|
||||
|
||||
if (!(flags & BCH_READ_MAY_PROMOTE))
|
||||
return false;
|
||||
|
||||
if (!opts.promote_target)
|
||||
return false;
|
||||
|
||||
if (bch2_extent_has_target(c, bkey_s_c_to_extent(k),
|
||||
opts.promote_target))
|
||||
if (bch2_bkey_has_target(c, k, opts.promote_target))
|
||||
return false;
|
||||
|
||||
if (bch2_target_congested(c, opts.promote_target)) {
|
||||
@ -1230,11 +1226,10 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
if (!bkey_extent_is_data(k.k) ||
|
||||
!bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
|
||||
rbio->pick.ptr,
|
||||
rbio->pos.offset -
|
||||
rbio->pick.crc.offset)) {
|
||||
if (!bch2_bkey_matches_ptr(c, bkey_i_to_s_c(&tmp.k),
|
||||
rbio->pick.ptr,
|
||||
rbio->pos.offset -
|
||||
rbio->pick.crc.offset)) {
|
||||
/* extent we wanted to read no longer exists: */
|
||||
rbio->hole = true;
|
||||
goto out;
|
||||
@ -1370,7 +1365,6 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_extent *e;
|
||||
BKEY_PADDED(k) new;
|
||||
struct bch_extent_crc_unpacked new_crc;
|
||||
u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
|
||||
@ -1389,34 +1383,30 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
|
||||
if (IS_ERR_OR_NULL(k.k))
|
||||
goto out;
|
||||
|
||||
if (!bkey_extent_is_data(k.k))
|
||||
goto out;
|
||||
|
||||
bkey_reassemble(&new.k, k);
|
||||
e = bkey_i_to_extent(&new.k);
|
||||
k = bkey_i_to_s_c(&new.k);
|
||||
|
||||
if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
|
||||
rbio->pick.ptr, data_offset) ||
|
||||
bversion_cmp(e->k.version, rbio->version))
|
||||
if (bversion_cmp(k.k->version, rbio->version) ||
|
||||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
|
||||
goto out;
|
||||
|
||||
/* Extent was merged? */
|
||||
if (bkey_start_offset(&e->k) < data_offset ||
|
||||
e->k.p.offset > data_offset + rbio->pick.crc.uncompressed_size)
|
||||
if (bkey_start_offset(k.k) < data_offset ||
|
||||
k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
|
||||
goto out;
|
||||
|
||||
if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
|
||||
rbio->pick.crc, NULL, &new_crc,
|
||||
bkey_start_offset(&e->k) - data_offset, e->k.size,
|
||||
bkey_start_offset(k.k) - data_offset, k.k->size,
|
||||
rbio->pick.crc.csum_type)) {
|
||||
bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!bch2_extent_narrow_crcs(e, new_crc))
|
||||
if (!bch2_bkey_narrow_crcs(&new.k, new_crc))
|
||||
goto out;
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &e->k_i));
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new.k));
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -1427,15 +1417,6 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
|
||||
bch2_trans_exit(&trans);
|
||||
}
|
||||
|
||||
static bool should_narrow_crcs(struct bkey_s_c k,
|
||||
struct extent_ptr_decoded *pick,
|
||||
unsigned flags)
|
||||
{
|
||||
return !(flags & BCH_READ_IN_RETRY) &&
|
||||
bkey_extent_is_data(k.k) &&
|
||||
bch2_can_narrow_extent_crcs(bkey_s_c_to_extent(k), pick->crc);
|
||||
}
|
||||
|
||||
/* Inner part that may run in process context */
|
||||
static void __bch2_read_endio(struct work_struct *work)
|
||||
{
|
||||
@ -1622,7 +1603,8 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
bio_flagged(&orig->bio, BIO_CHAIN))
|
||||
flags |= BCH_READ_MUST_CLONE;
|
||||
|
||||
narrow_crcs = should_narrow_crcs(k, &pick, flags);
|
||||
narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
|
||||
bch2_can_narrow_extent_crcs(k, pick.crc);
|
||||
|
||||
if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
|
||||
flags |= BCH_READ_MUST_BOUNCE;
|
||||
|
@ -49,8 +49,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k))) {
|
||||
if (!bkey_extent_is_data(k.k) ||
|
||||
!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
|
||||
if (!bch2_bkey_has_device(k, dev_idx)) {
|
||||
ret = bch2_mark_bkey_replicas(c, k);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -82,9 +82,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
break;
|
||||
|
||||
if (bversion_cmp(k.k->version, new->k.version) ||
|
||||
!bkey_extent_is_data(k.k) ||
|
||||
!bch2_extent_matches_ptr(c, bkey_s_c_to_extent(k),
|
||||
m->ptr, m->offset))
|
||||
!bch2_bkey_matches_ptr(c, k, m->ptr, m->offset))
|
||||
goto nomatch;
|
||||
|
||||
if (m->data_cmd == DATA_REWRITE &&
|
||||
@ -116,14 +114,14 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
continue;
|
||||
}
|
||||
|
||||
bch2_extent_ptr_decoded_append(insert, &p);
|
||||
bch2_extent_ptr_decoded_append(&insert->k_i, &p);
|
||||
did_work = true;
|
||||
}
|
||||
|
||||
if (!did_work)
|
||||
goto nomatch;
|
||||
|
||||
bch2_extent_narrow_crcs(insert,
|
||||
bch2_bkey_narrow_crcs(&insert->k_i,
|
||||
(struct bch_extent_crc_unpacked) { 0 });
|
||||
bch2_extent_normalize(c, extent_i_to_s(insert).s);
|
||||
bch2_extent_mark_replicas_cached(c, extent_i_to_s(insert),
|
||||
@ -393,14 +391,15 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
struct moving_context *ctxt,
|
||||
struct write_point_specifier wp,
|
||||
struct bch_io_opts io_opts,
|
||||
struct bkey_s_c_extent e,
|
||||
struct bkey_s_c k,
|
||||
enum data_cmd data_cmd,
|
||||
struct data_opts data_opts)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct moving_io *io;
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
unsigned sectors = e.k->size, pages;
|
||||
unsigned sectors = k.k->size, pages;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
move_ctxt_wait_event(ctxt,
|
||||
@ -412,7 +411,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
SECTORS_IN_FLIGHT_PER_DEVICE);
|
||||
|
||||
/* write path might have to decompress data: */
|
||||
extent_for_each_ptr_decode(e, p, entry)
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
|
||||
|
||||
pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
|
||||
@ -422,8 +421,8 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
goto err;
|
||||
|
||||
io->write.ctxt = ctxt;
|
||||
io->read_sectors = e.k->size;
|
||||
io->write_sectors = e.k->size;
|
||||
io->read_sectors = k.k->size;
|
||||
io->write_sectors = k.k->size;
|
||||
|
||||
bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
|
||||
bio_set_prio(&io->write.op.wbio.bio,
|
||||
@ -440,18 +439,18 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
io->rbio.bio.bi_iter.bi_size = sectors << 9;
|
||||
|
||||
io->rbio.bio.bi_opf = REQ_OP_READ;
|
||||
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(e.k);
|
||||
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
|
||||
io->rbio.bio.bi_end_io = move_read_endio;
|
||||
|
||||
ret = bch2_migrate_write_init(c, &io->write, wp, io_opts,
|
||||
data_cmd, data_opts, e.s_c);
|
||||
data_cmd, data_opts, k);
|
||||
if (ret)
|
||||
goto err_free_pages;
|
||||
|
||||
atomic64_inc(&ctxt->stats->keys_moved);
|
||||
atomic64_add(e.k->size, &ctxt->stats->sectors_moved);
|
||||
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
|
||||
|
||||
trace_move_extent(e.k);
|
||||
trace_move_extent(k.k);
|
||||
|
||||
atomic_add(io->read_sectors, &ctxt->read_sectors);
|
||||
list_add_tail(&io->list, &ctxt->reads);
|
||||
@ -461,7 +460,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
* ctxt when doing wakeup
|
||||
*/
|
||||
closure_get(&ctxt->cl);
|
||||
bch2_read_extent(c, &io->rbio, e.s_c, 0,
|
||||
bch2_read_extent(c, &io->rbio, k, 0,
|
||||
BCH_READ_NODECODE|
|
||||
BCH_READ_LAST_FRAGMENT);
|
||||
return 0;
|
||||
@ -470,7 +469,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
err_free:
|
||||
kfree(io);
|
||||
err:
|
||||
trace_move_alloc_fail(e.k);
|
||||
trace_move_alloc_fail(k.k);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -580,8 +579,7 @@ int bch2_move_data(struct bch_fs *c,
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
ret2 = bch2_move_extent(c, &ctxt, wp, io_opts,
|
||||
bkey_s_c_to_extent(k),
|
||||
ret2 = bch2_move_extent(c, &ctxt, wp, io_opts, k,
|
||||
data_cmd, data_opts);
|
||||
if (ret2) {
|
||||
if (ret2 == -ENOMEM) {
|
||||
|
@ -69,26 +69,19 @@ static bool __copygc_pred(struct bch_dev *ca,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
copygc_heap *h = &ca->copygc_heap;
|
||||
const struct bch_extent_ptr *ptr =
|
||||
bch2_bkey_has_device(k, ca->dev_idx);
|
||||
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_extent: {
|
||||
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
|
||||
const struct bch_extent_ptr *ptr =
|
||||
bch2_extent_has_device(e, ca->dev_idx);
|
||||
if (ptr) {
|
||||
struct copygc_heap_entry search = { .offset = ptr->offset };
|
||||
|
||||
if (ptr) {
|
||||
struct copygc_heap_entry search = { .offset = ptr->offset };
|
||||
ssize_t i = eytzinger0_find_le(h->data, h->used,
|
||||
sizeof(h->data[0]),
|
||||
bucket_offset_cmp, &search);
|
||||
|
||||
ssize_t i = eytzinger0_find_le(h->data, h->used,
|
||||
sizeof(h->data[0]),
|
||||
bucket_offset_cmp, &search);
|
||||
|
||||
return (i >= 0 &&
|
||||
ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
|
||||
ptr->gen == h->data[i].gen);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return (i >= 0 &&
|
||||
ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
|
||||
ptr->gen == h->data[i].gen);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -38,9 +38,9 @@ void bch2_rebalance_add_key(struct bch_fs *c,
|
||||
struct bkey_s_c k,
|
||||
struct bch_io_opts *io_opts)
|
||||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
struct bkey_s_c_extent e;
|
||||
|
||||
if (!bkey_extent_is_data(k.k))
|
||||
return;
|
||||
@ -49,9 +49,7 @@ void bch2_rebalance_add_key(struct bch_fs *c,
|
||||
!io_opts->background_compression)
|
||||
return;
|
||||
|
||||
e = bkey_s_c_to_extent(k);
|
||||
|
||||
extent_for_each_ptr_decode(e, p, entry)
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
if (rebalance_ptr_pred(c, p, io_opts)) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user