mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
bcachefs: growable btree_paths
XXX: we're allocating memory with btree locks held - bad We need to plumb through an error path so we can do allocate_dropping_locks() - but we're merging this now because it fixes a transaction path overflow caused by indirect extent fragmentation, and the resize path is rare. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
ff70ad2c8d
commit
0c99e17d3b
@ -1209,7 +1209,6 @@ static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_i
|
||||
bool intent)
|
||||
{
|
||||
btree_path_idx_t new = btree_path_alloc(trans, src);
|
||||
|
||||
btree_path_copy(trans, trans->paths + new, trans->paths + src);
|
||||
__btree_path_get(trans->paths + new, intent);
|
||||
return new;
|
||||
@ -1515,7 +1514,47 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
|
||||
static noinline void btree_path_overflow(struct btree_trans *trans)
|
||||
{
|
||||
bch2_dump_trans_paths_updates(trans);
|
||||
panic("trans path overflow\n");
|
||||
bch_err(trans->c, "trans path overflow");
|
||||
}
|
||||
|
||||
static noinline void btree_paths_realloc(struct btree_trans *trans)
|
||||
{
|
||||
unsigned nr = trans->nr_paths * 2;
|
||||
|
||||
void *p = kzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
|
||||
sizeof(struct btree_trans_paths) +
|
||||
nr * sizeof(struct btree_path) +
|
||||
nr * sizeof(btree_path_idx_t) + 8 +
|
||||
nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
|
||||
|
||||
unsigned long *paths_allocated = p;
|
||||
memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
|
||||
p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
|
||||
|
||||
p += sizeof(struct btree_trans_paths);
|
||||
struct btree_path *paths = p;
|
||||
*trans_paths_nr(paths) = nr;
|
||||
memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
|
||||
p += nr * sizeof(struct btree_path);
|
||||
|
||||
btree_path_idx_t *sorted = p;
|
||||
memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
|
||||
p += nr * sizeof(btree_path_idx_t) + 8;
|
||||
|
||||
struct btree_insert_entry *updates = p;
|
||||
memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
|
||||
|
||||
unsigned long *old = trans->paths_allocated;
|
||||
|
||||
rcu_assign_pointer(trans->paths_allocated, paths_allocated);
|
||||
rcu_assign_pointer(trans->paths, paths);
|
||||
rcu_assign_pointer(trans->sorted, sorted);
|
||||
rcu_assign_pointer(trans->updates, updates);
|
||||
|
||||
trans->nr_paths = nr;
|
||||
|
||||
if (old != trans->_paths_allocated)
|
||||
kfree_rcu_mightsleep(old);
|
||||
}
|
||||
|
||||
static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
|
||||
@ -1523,8 +1562,14 @@ static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
|
||||
{
|
||||
btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
|
||||
|
||||
if (unlikely(idx == trans->nr_paths))
|
||||
btree_path_overflow(trans);
|
||||
if (unlikely(idx == trans->nr_paths)) {
|
||||
if (trans->nr_paths == BTREE_ITER_MAX) {
|
||||
btree_path_overflow(trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
btree_paths_realloc(trans);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do this before marking the new path as allocated, since it won't be
|
||||
@ -2607,21 +2652,18 @@ void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
|
||||
static inline void btree_path_list_remove(struct btree_trans *trans,
|
||||
struct btree_path *path)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
EBUG_ON(path->sorted_idx >= trans->nr_sorted);
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
trans->nr_sorted--;
|
||||
memmove_u64s_down_small(trans->sorted + path->sorted_idx,
|
||||
trans->sorted + path->sorted_idx + 1,
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
|
||||
sizeof(u64) / sizeof(btree_path_idx_t)));
|
||||
#else
|
||||
array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
|
||||
#endif
|
||||
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
trans->paths[trans->sorted[i]].sorted_idx = i;
|
||||
|
||||
path->sorted_idx = U8_MAX;
|
||||
}
|
||||
|
||||
static inline void btree_path_list_add(struct btree_trans *trans,
|
||||
@ -2629,21 +2671,21 @@ static inline void btree_path_list_add(struct btree_trans *trans,
|
||||
btree_path_idx_t path_idx)
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
unsigned i;
|
||||
|
||||
path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
|
||||
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
|
||||
trans->sorted + path->sorted_idx,
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
|
||||
sizeof(u64) / sizeof(btree_path_idx_t)));
|
||||
trans->nr_sorted++;
|
||||
trans->sorted[path->sorted_idx] = path_idx;
|
||||
#else
|
||||
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
|
||||
#endif
|
||||
|
||||
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
trans->paths[trans->sorted[i]].sorted_idx = i;
|
||||
|
||||
btree_trans_verify_sorted_refs(trans);
|
||||
@ -2939,7 +2981,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
|
||||
trans->paths = trans->_paths;
|
||||
trans->updates = trans->_updates;
|
||||
|
||||
*trans_paths_nr(trans->paths) = BTREE_ITER_MAX;
|
||||
*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
|
||||
|
||||
trans->paths_allocated[0] = 1;
|
||||
|
||||
@ -3020,6 +3062,13 @@ void bch2_trans_put(struct btree_trans *trans)
|
||||
if (unlikely(trans->journal_replay_not_finished))
|
||||
bch2_journal_keys_put(c);
|
||||
|
||||
unsigned long *paths_allocated = trans->paths_allocated;
|
||||
trans->paths_allocated = NULL;
|
||||
trans->paths = NULL;
|
||||
|
||||
if (paths_allocated != trans->_paths_allocated)
|
||||
kfree_rcu_mightsleep(paths_allocated);
|
||||
|
||||
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
|
||||
mempool_free(trans->mem, &c->btree_trans_mem_pool);
|
||||
else
|
||||
|
@ -642,7 +642,7 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *);
|
||||
|
||||
static inline int btree_trans_too_many_iters(struct btree_trans *trans)
|
||||
{
|
||||
if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_MAX - 8)
|
||||
if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_INITIAL - 8)
|
||||
return __bch2_btree_trans_too_many_iters(trans);
|
||||
|
||||
return 0;
|
||||
|
@ -358,7 +358,8 @@ struct btree_insert_entry {
|
||||
unsigned long ip_allocated;
|
||||
};
|
||||
|
||||
#define BTREE_ITER_MAX 64
|
||||
#define BTREE_ITER_INITIAL 64
|
||||
#define BTREE_ITER_MAX (1U << 10)
|
||||
|
||||
struct btree_trans_commit_hook;
|
||||
typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
|
||||
@ -382,7 +383,7 @@ struct btree_trans {
|
||||
|
||||
unsigned long *paths_allocated;
|
||||
struct btree_path *paths;
|
||||
u8 *sorted;
|
||||
btree_path_idx_t *sorted;
|
||||
struct btree_insert_entry *updates;
|
||||
|
||||
void *mem;
|
||||
@ -438,11 +439,11 @@ struct btree_trans {
|
||||
struct list_head list;
|
||||
struct closure ref;
|
||||
|
||||
unsigned long _paths_allocated[BITS_TO_LONGS(BTREE_ITER_MAX)];
|
||||
unsigned long _paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
|
||||
struct btree_trans_paths trans_paths;
|
||||
struct btree_path _paths[BTREE_ITER_MAX];
|
||||
u8 _sorted[BTREE_ITER_MAX + 8];
|
||||
struct btree_insert_entry _updates[BTREE_ITER_MAX];
|
||||
struct btree_path _paths[BTREE_ITER_INITIAL];
|
||||
btree_path_idx_t _sorted[BTREE_ITER_INITIAL + 4];
|
||||
struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
|
||||
};
|
||||
|
||||
static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
|
||||
|
@ -100,7 +100,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
|
||||
return ret2 ?: ret;
|
||||
}
|
||||
|
||||
#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
|
||||
#define EXTENT_ITERS_MAX (BTREE_ITER_INITIAL / 3)
|
||||
|
||||
int bch2_extent_atomic_end(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
|
Loading…
Reference in New Issue
Block a user