linux-next/fs/btrfs/misc.h
Qu Wenruo 149f7d3c2b btrfs: enhance ordered extent double freeing detection
With recent bugs exposed through run_delalloc_range() failure, the
importance of detecting double accounting is obvious.

Currently the way to detect such errors is to just check if we underflow
the btrfs_ordered_extent::bytes_left member.

That's fine but that only shows the length we're trying to decrease, not
enough to show the problem.

Here we enhance the situation by:

- Introduce btrfs_ordered_extent::finished_bitmap
  This is a new bitmap to indicate which blocks are already finished.
  This bitmap will be initialized at alloc_ordered_extent() and release
  when the ordered extent is freed.

- Detect any already finished block during can_finish_ordered_extent()
  If double accounting detected, show the full range we're trying and the bitmap.

- Make sure the bitmap is all set when the OE is finished

- Show the full range we're finishing for the existing double accounting
  detection
  This is to enhance the code to work with the new run_delalloc_range()
  error messages.

This will have extra memory and runtime cost, now an ordered extent can
have as large as 4K memory just for the finished_bitmap, and extra
operations to detect such double accounting.

Thus this double accounting detection is only enabled for
CONFIG_BTRFS_DEBUG build for developers.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2024-12-18 02:41:31 +01:00

195 lines
4.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_MISC_H
#define BTRFS_MISC_H
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/math64.h>
#include <linux/rbtree.h>
/*
* Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
*/
#define ENUM_BIT(name) \
__ ## name ## _BIT, \
name = (1U << __ ## name ## _BIT), \
__ ## name ## _SEQ = __ ## name ## _BIT
static inline void cond_wake_up(struct wait_queue_head *wq)
{
/*
* This implies a full smp_mb barrier, see comments for
* waitqueue_active why.
*/
if (wq_has_sleeper(wq))
wake_up(wq);
}
static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
{
/*
* Special case for conditional wakeup where the barrier required for
* waitqueue_active is implied by some of the preceding code. Eg. one
* of such atomic operations (atomic_dec_and_return, ...), or a
* unlock/lock sequence, etc.
*/
if (waitqueue_active(wq))
wake_up(wq);
}
static inline u64 mult_perc(u64 num, u32 percent)
{
return div_u64(num * percent, 100);
}
/* Copy of is_power_of_two that is 64bit safe */
static inline bool is_power_of_two_u64(u64 n)
{
return n != 0 && (n & (n - 1)) == 0;
}
static inline bool has_single_bit_set(u64 n)
{
return is_power_of_two_u64(n);
}
/*
* Simple bytenr based rb_tree relate structures
*
* Any structure wants to use bytenr as single search index should have their
* structure start with these members.
*/
struct rb_simple_node {
struct rb_node rb_node;
u64 bytenr;
};
static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
node = node->rb_left;
else if (bytenr > entry->bytenr)
node = node->rb_right;
else
return node;
}
return NULL;
}
/*
* Search @root from an entry that starts or comes after @bytenr.
*
* @root: the root to search.
* @bytenr: bytenr to search from.
*
* Return the rb_node that start at or after @bytenr. If there is no entry at
* or after @bytner return NULL.
*/
static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
u64 bytenr)
{
struct rb_node *node = root->rb_node, *ret = NULL;
struct rb_simple_node *entry, *ret_entry = NULL;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr) {
if (!ret || entry->bytenr < ret_entry->bytenr) {
ret = node;
ret_entry = entry;
}
node = node->rb_left;
} else if (bytenr > entry->bytenr) {
node = node->rb_right;
} else {
return node;
}
}
return ret;
}
static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rb_simple_node *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static inline bool bitmap_test_range_all_set(const unsigned long *addr,
unsigned long start,
unsigned long nbits)
{
unsigned long found_zero;
found_zero = find_next_zero_bit(addr, start + nbits, start);
return (found_zero == start + nbits);
}
static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
unsigned long start,
unsigned long nbits)
{
unsigned long found_set;
found_set = find_next_bit(addr, start + nbits, start);
return (found_set == start + nbits);
}
/*
* Count how many bits are set in the bitmap.
*
* Similar to bitmap_weight() but accepts a subrange of the bitmap.
*/
static inline unsigned int bitmap_count_set(const unsigned long *addr,
unsigned long start,
unsigned long nbits)
{
const unsigned long bitmap_nbits = start + nbits;
unsigned long cur = start;
unsigned long total_set = 0;
while (cur < bitmap_nbits) {
unsigned long found_zero;
unsigned long found_set;
found_zero = find_next_zero_bit(addr, bitmap_nbits, cur);
total_set += found_zero - cur;
cur = found_zero;
if (cur >= bitmap_nbits)
break;
found_set = find_next_bit(addr, bitmap_nbits, cur);
cur = found_set;
}
return total_set;
}
#endif