mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
dm-vdo: change unnamed enums to defines
Signed-off-by: Bruce Johnston <bjohnsto@redhat.com> Signed-off-by: Matthew Sakai <msakai@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
04530b487b
commit
6008d526b0
@ -114,10 +114,8 @@ const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY = {
|
||||
.pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX),
|
||||
};
|
||||
|
||||
enum {
|
||||
LOG_INTERVAL = 4000,
|
||||
DISPLAY_INTERVAL = 100000,
|
||||
};
|
||||
#define LOG_INTERVAL 4000
|
||||
#define DISPLAY_INTERVAL 100000
|
||||
|
||||
/*
|
||||
* For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
|
||||
|
@ -114,9 +114,7 @@ static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEA
|
||||
* them are awakened.
|
||||
*/
|
||||
|
||||
enum {
|
||||
DATA_VIO_RELEASE_BATCH_SIZE = 128,
|
||||
};
|
||||
#define DATA_VIO_RELEASE_BATCH_SIZE 128
|
||||
|
||||
static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1;
|
||||
static const u32 COMPRESSION_STATUS_MASK = 0xff;
|
||||
@ -1044,8 +1042,8 @@ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
|
||||
* In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the
|
||||
* second clock tick). These numbers were picked based on experiments with lab machines.
|
||||
*/
|
||||
enum { ELEMENTS_PER_BATCH = 35 };
|
||||
enum { SLEEP_FOR_SYSLOG = 4000 };
|
||||
static const int ELEMENTS_PER_BATCH = 35;
|
||||
static const int SLEEP_FOR_SYSLOG = 4000;
|
||||
|
||||
if (pool == NULL)
|
||||
return;
|
||||
|
@ -154,11 +154,9 @@ struct uds_attribute {
|
||||
const char *(*show_string)(struct hash_zones *hash_zones);
|
||||
};
|
||||
|
||||
enum timer_state {
|
||||
DEDUPE_QUERY_TIMER_IDLE,
|
||||
DEDUPE_QUERY_TIMER_RUNNING,
|
||||
DEDUPE_QUERY_TIMER_FIRED,
|
||||
};
|
||||
#define DEDUPE_QUERY_TIMER_IDLE 0
|
||||
#define DEDUPE_QUERY_TIMER_RUNNING 1
|
||||
#define DEDUPE_QUERY_TIMER_FIRED 2
|
||||
|
||||
enum dedupe_context_state {
|
||||
DEDUPE_CONTEXT_IDLE,
|
||||
@ -185,11 +183,9 @@ static const char *SUSPENDED = "suspended";
|
||||
static const char *UNKNOWN = "unknown";
|
||||
|
||||
/* Version 2 uses the kernel space UDS index and is limited to 16 bytes */
|
||||
enum {
|
||||
UDS_ADVICE_VERSION = 2,
|
||||
/* version byte + state byte + 64-bit little-endian PBN */
|
||||
UDS_ADVICE_SIZE = 1 + 1 + sizeof(u64),
|
||||
};
|
||||
#define UDS_ADVICE_VERSION 2
|
||||
/* version byte + state byte + 64-bit little-endian PBN */
|
||||
#define UDS_ADVICE_SIZE (1 + 1 + sizeof(u64))
|
||||
|
||||
enum hash_lock_state {
|
||||
/* State for locks that are not in use or are being initialized. */
|
||||
@ -279,9 +275,7 @@ struct hash_lock {
|
||||
struct vdo_wait_queue waiters;
|
||||
};
|
||||
|
||||
enum {
|
||||
LOCK_POOL_CAPACITY = MAXIMUM_VDO_USER_VIOS,
|
||||
};
|
||||
#define LOCK_POOL_CAPACITY MAXIMUM_VDO_USER_VIOS
|
||||
|
||||
struct hash_zones {
|
||||
struct action_manager *manager;
|
||||
|
@ -42,7 +42,7 @@
|
||||
|
||||
#define CURRENT_VERSION "8.3.0.65"
|
||||
|
||||
enum {
|
||||
enum admin_phases {
|
||||
GROW_LOGICAL_PHASE_START,
|
||||
GROW_LOGICAL_PHASE_GROW_BLOCK_MAP,
|
||||
GROW_LOGICAL_PHASE_END,
|
||||
@ -142,10 +142,8 @@ static const char * const ADMIN_PHASE_NAMES[] = {
|
||||
"SUSPEND_PHASE_END",
|
||||
};
|
||||
|
||||
enum {
|
||||
/* If we bump this, update the arrays below */
|
||||
TABLE_VERSION = 4,
|
||||
};
|
||||
/* If we bump this, update the arrays below */
|
||||
#define TABLE_VERSION 4
|
||||
|
||||
/* arrays for handling different table versions */
|
||||
static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 };
|
||||
@ -159,17 +157,15 @@ static const u8 POOL_NAME_ARG_INDEX[] = { 8, 10, 8 };
|
||||
* need to scan 16 words, so it's not likely to be a big deal compared to other resource usage.
|
||||
*/
|
||||
|
||||
enum {
|
||||
/*
|
||||
* This minimum size for the bit array creates a numbering space of 0-999, which allows
|
||||
* successive starts of the same volume to have different instance numbers in any
|
||||
* reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that
|
||||
* the ephemeral stats have reset to zero.
|
||||
*/
|
||||
BIT_COUNT_MINIMUM = 1000,
|
||||
/** Grow the bit array by this many bits when needed */
|
||||
BIT_COUNT_INCREMENT = 100,
|
||||
};
|
||||
/*
|
||||
* This minimum size for the bit array creates a numbering space of 0-999, which allows
|
||||
* successive starts of the same volume to have different instance numbers in any
|
||||
* reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that
|
||||
* the ephemeral stats have reset to zero.
|
||||
*/
|
||||
#define BIT_COUNT_MINIMUM 1000
|
||||
/* Grow the bit array by this many bits when needed */
|
||||
#define BIT_COUNT_INCREMENT 100
|
||||
|
||||
struct instance_tracker {
|
||||
unsigned int bit_count;
|
||||
|
@ -41,10 +41,10 @@ enum dump_option_flags {
|
||||
FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT)
|
||||
};
|
||||
|
||||
enum {
|
||||
FLAGS_ALL_POOLS = (FLAG_SHOW_VIO_POOL),
|
||||
DEFAULT_DUMP_FLAGS = (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS)
|
||||
};
|
||||
#define FLAGS_ALL_POOLS (FLAG_SHOW_VIO_POOL)
|
||||
#define DEFAULT_DUMP_FLAGS (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS)
|
||||
/* Another static buffer... log10(256) = 2.408+, round up: */
|
||||
#define DIGITS_PER_U64 (1 + sizeof(u64) * 2409 / 1000)
|
||||
|
||||
static inline bool is_arg_string(const char *arg, const char *this_option)
|
||||
{
|
||||
@ -222,9 +222,6 @@ void dump_data_vio(void *data)
|
||||
* one does run, the log output will be garbled anyway.
|
||||
*/
|
||||
static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN];
|
||||
/* Another static buffer... log10(256) = 2.408+, round up: */
|
||||
enum { DIGITS_PER_U64 = 1 + sizeof(u64) * 2409 / 1000 };
|
||||
|
||||
static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64];
|
||||
static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64];
|
||||
static char flags_dump_buffer[8];
|
||||
|
@ -55,9 +55,7 @@ static const struct header GEOMETRY_BLOCK_HEADER_4_0 = {
|
||||
|
||||
const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001";
|
||||
|
||||
enum {
|
||||
PAGE_HEADER_4_1_SIZE = 8 + 8 + 8 + 1 + 1 + 1 + 1,
|
||||
};
|
||||
#define PAGE_HEADER_4_1_SIZE (8 + 8 + 8 + 1 + 1 + 1 + 1)
|
||||
|
||||
static const struct version_number BLOCK_MAP_4_1 = {
|
||||
.major_version = 4,
|
||||
|
@ -79,9 +79,7 @@ struct error_block {
|
||||
const struct error_info *infos;
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_ERROR_BLOCKS = 6,
|
||||
};
|
||||
#define MAX_ERROR_BLOCKS 6
|
||||
|
||||
static struct {
|
||||
int allocated;
|
||||
|
@ -15,12 +15,10 @@ static const u8 INDEX_CONFIG_MAGIC[] = "ALBIC";
|
||||
static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02";
|
||||
static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02";
|
||||
|
||||
enum {
|
||||
DEFAULT_VOLUME_READ_THREADS = 2,
|
||||
MAX_VOLUME_READ_THREADS = 16,
|
||||
INDEX_CONFIG_MAGIC_LENGTH = sizeof(INDEX_CONFIG_MAGIC) - 1,
|
||||
INDEX_CONFIG_VERSION_LENGTH = sizeof(INDEX_CONFIG_VERSION_6_02) - 1,
|
||||
};
|
||||
#define DEFAULT_VOLUME_READ_THREADS 2
|
||||
#define MAX_VOLUME_READ_THREADS 16
|
||||
#define INDEX_CONFIG_MAGIC_LENGTH (sizeof(INDEX_CONFIG_MAGIC) - 1)
|
||||
#define INDEX_CONFIG_VERSION_LENGTH ((int)(sizeof(INDEX_CONFIG_VERSION_6_02) - 1))
|
||||
|
||||
static bool is_version(const u8 *version, u8 *buffer)
|
||||
{
|
||||
|
@ -70,17 +70,13 @@
|
||||
* This is the largest field size supported by get_field() and set_field(). Any field that is
|
||||
* larger is not guaranteed to fit in a single byte-aligned u32.
|
||||
*/
|
||||
enum {
|
||||
MAX_FIELD_BITS = (sizeof(u32) - 1) * BITS_PER_BYTE + 1,
|
||||
};
|
||||
#define MAX_FIELD_BITS ((sizeof(u32) - 1) * BITS_PER_BYTE + 1)
|
||||
|
||||
/*
|
||||
* This is the largest field size supported by get_big_field() and set_big_field(). Any field that
|
||||
* is larger is not guaranteed to fit in a single byte-aligned u64.
|
||||
*/
|
||||
enum {
|
||||
MAX_BIG_FIELD_BITS = (sizeof(u64) - 1) * BITS_PER_BYTE + 1,
|
||||
};
|
||||
#define MAX_BIG_FIELD_BITS ((sizeof(u64) - 1) * BITS_PER_BYTE + 1)
|
||||
|
||||
/*
|
||||
* This is the number of guard bytes needed at the end of the memory byte array when using the bit
|
||||
@ -88,45 +84,33 @@ enum {
|
||||
* bytes beyond the end of the desired field. The definition is written to make it clear how this
|
||||
* value is derived.
|
||||
*/
|
||||
enum {
|
||||
POST_FIELD_GUARD_BYTES = sizeof(u64) - 1,
|
||||
};
|
||||
#define POST_FIELD_GUARD_BYTES (sizeof(u64) - 1)
|
||||
|
||||
/* The number of guard bits that are needed in the tail guard list */
|
||||
enum {
|
||||
GUARD_BITS = POST_FIELD_GUARD_BYTES * BITS_PER_BYTE
|
||||
};
|
||||
#define GUARD_BITS (POST_FIELD_GUARD_BYTES * BITS_PER_BYTE)
|
||||
|
||||
/*
|
||||
* The maximum size of a single delta list in bytes. We count guard bytes in this value because a
|
||||
* buffer of this size can be used with move_bits().
|
||||
*/
|
||||
enum {
|
||||
DELTA_LIST_MAX_BYTE_COUNT =
|
||||
((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
|
||||
};
|
||||
#define DELTA_LIST_MAX_BYTE_COUNT \
|
||||
((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
|
||||
|
||||
/* The number of extra bytes and bits needed to store a collision entry */
|
||||
enum {
|
||||
COLLISION_BYTES = UDS_RECORD_NAME_SIZE,
|
||||
COLLISION_BITS = COLLISION_BYTES * BITS_PER_BYTE
|
||||
};
|
||||
#define COLLISION_BYTES UDS_RECORD_NAME_SIZE
|
||||
#define COLLISION_BITS (COLLISION_BYTES * BITS_PER_BYTE)
|
||||
|
||||
/*
|
||||
* Immutable delta lists are packed into pages containing a header that encodes the delta list
|
||||
* information into 19 bits per list (64KB bit offset).
|
||||
*/
|
||||
|
||||
enum { IMMUTABLE_HEADER_SIZE = 19 };
|
||||
#define IMMUTABLE_HEADER_SIZE 19
|
||||
|
||||
/*
|
||||
* Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a
|
||||
* number to increment when the format of the data changes.
|
||||
*/
|
||||
|
||||
enum {
|
||||
MAGIC_SIZE = 8,
|
||||
};
|
||||
#define MAGIC_SIZE 8
|
||||
|
||||
static const char DELTA_INDEX_MAGIC[] = "DI-00002";
|
||||
|
||||
@ -216,9 +200,7 @@ static void rebalance_delta_zone(const struct delta_zone *delta_zone, u32 first,
|
||||
static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size)
|
||||
{
|
||||
/* Round up so that each zone is a multiple of 64K in size. */
|
||||
enum {
|
||||
ALLOC_BOUNDARY = 64 * 1024,
|
||||
};
|
||||
size_t ALLOC_BOUNDARY = 64 * 1024;
|
||||
|
||||
return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY;
|
||||
}
|
||||
|
@ -54,11 +54,9 @@
|
||||
* Each save also has a unique nonce.
|
||||
*/
|
||||
|
||||
enum {
|
||||
MAGIC_SIZE = 32,
|
||||
NONCE_INFO_SIZE = 32,
|
||||
MAX_SAVES = 2,
|
||||
};
|
||||
#define MAGIC_SIZE 32
|
||||
#define NONCE_INFO_SIZE 32
|
||||
#define MAX_SAVES 2
|
||||
|
||||
enum region_kind {
|
||||
RL_KIND_EMPTY = 0,
|
||||
@ -82,9 +80,7 @@ enum region_type {
|
||||
RH_TYPE_UNSAVED = 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
RL_SOLE_INSTANCE = 65535,
|
||||
};
|
||||
#define RL_SOLE_INSTANCE 65535
|
||||
|
||||
/*
|
||||
* Super block version 2 is the first released version.
|
||||
@ -98,11 +94,9 @@ enum {
|
||||
* order to make room to prepend LVM metadata to a volume originally created without lvm. This
|
||||
* allows the index to retain most its deduplication records.
|
||||
*/
|
||||
enum {
|
||||
SUPER_VERSION_MINIMUM = 3,
|
||||
SUPER_VERSION_CURRENT = 3,
|
||||
SUPER_VERSION_MAXIMUM = 7,
|
||||
};
|
||||
#define SUPER_VERSION_MINIMUM 3
|
||||
#define SUPER_VERSION_CURRENT 3
|
||||
#define SUPER_VERSION_MAXIMUM 7
|
||||
|
||||
static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
|
||||
static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
|
||||
|
@ -25,9 +25,7 @@
|
||||
|
||||
static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02";
|
||||
|
||||
enum {
|
||||
PAGE_MAP_MAGIC_LENGTH = sizeof(PAGE_MAP_MAGIC) - 1,
|
||||
};
|
||||
#define PAGE_MAP_MAGIC_LENGTH (sizeof(PAGE_MAP_MAGIC) - 1)
|
||||
|
||||
static inline u32 get_entry_count(const struct index_geometry *geometry)
|
||||
{
|
||||
|
@ -37,7 +37,7 @@ struct buffered_reader {
|
||||
u8 *end;
|
||||
};
|
||||
|
||||
enum { MAX_READ_AHEAD_BLOCKS = 4 };
|
||||
#define MAX_READ_AHEAD_BLOCKS 4
|
||||
|
||||
/*
|
||||
* The buffered writer allows efficient I/O by buffering writes and committing page-sized segments
|
||||
|
@ -46,11 +46,9 @@
|
||||
static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC";
|
||||
static const u8 OPEN_CHAPTER_VERSION[] = "02.00";
|
||||
|
||||
enum {
|
||||
OPEN_CHAPTER_MAGIC_LENGTH = sizeof(OPEN_CHAPTER_MAGIC) - 1,
|
||||
OPEN_CHAPTER_VERSION_LENGTH = sizeof(OPEN_CHAPTER_VERSION) - 1,
|
||||
LOAD_RATIO = 2,
|
||||
};
|
||||
#define OPEN_CHAPTER_MAGIC_LENGTH (sizeof(OPEN_CHAPTER_MAGIC) - 1)
|
||||
#define OPEN_CHAPTER_VERSION_LENGTH (sizeof(OPEN_CHAPTER_VERSION) - 1)
|
||||
#define LOAD_RATIO 2
|
||||
|
||||
static inline size_t records_size(const struct open_chapter_zone *open_chapter)
|
||||
{
|
||||
|
@ -17,10 +17,8 @@
|
||||
* keys to be sorted.
|
||||
*/
|
||||
|
||||
enum {
|
||||
/* Piles smaller than this are handled with a simple insertion sort. */
|
||||
INSERTION_SORT_THRESHOLD = 12,
|
||||
};
|
||||
/* Piles smaller than this are handled with a simple insertion sort. */
|
||||
#define INSERTION_SORT_THRESHOLD 12
|
||||
|
||||
/* Sort keys are pointers to immutable fixed-length arrays of bytes. */
|
||||
typedef const u8 *sort_key_t;
|
||||
|
@ -77,10 +77,8 @@
|
||||
* considered to be a member of the cache for uds_sparse_cache_contains().
|
||||
*/
|
||||
|
||||
enum {
|
||||
SKIP_SEARCH_THRESHOLD = 20000,
|
||||
ZONE_ZERO = 0,
|
||||
};
|
||||
#define SKIP_SEARCH_THRESHOLD 20000
|
||||
#define ZONE_ZERO 0
|
||||
|
||||
/*
|
||||
* These counters are essentially fields of the struct cached_chapter_index, but are segregated
|
||||
|
@ -94,7 +94,8 @@ struct chapter_range {
|
||||
u32 chapter_count;
|
||||
};
|
||||
|
||||
enum { MAGIC_SIZE = 8 };
|
||||
#define MAGIC_SIZE 8
|
||||
|
||||
static const char MAGIC_START_5[] = "MI5-0005";
|
||||
|
||||
struct sub_index_data {
|
||||
@ -193,10 +194,11 @@ unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index,
|
||||
return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name);
|
||||
}
|
||||
|
||||
#define DELTA_LIST_SIZE 256
|
||||
|
||||
static int compute_volume_sub_index_parameters(const struct uds_configuration *config,
|
||||
struct sub_index_parameters *params)
|
||||
{
|
||||
enum { DELTA_LIST_SIZE = 256 };
|
||||
u64 entries_in_volume_index, address_span;
|
||||
u32 chapters_in_volume_index, invalid_chapters;
|
||||
u32 rounded_chapters;
|
||||
|
@ -60,13 +60,11 @@
|
||||
* in-memory volume index.
|
||||
*/
|
||||
|
||||
enum {
|
||||
/* The maximum allowable number of contiguous bad chapters */
|
||||
MAX_BAD_CHAPTERS = 100,
|
||||
VOLUME_CACHE_MAX_ENTRIES = (U16_MAX >> 1),
|
||||
VOLUME_CACHE_QUEUED_FLAG = (1 << 15),
|
||||
VOLUME_CACHE_MAX_QUEUED_READS = 4096,
|
||||
};
|
||||
/* The maximum allowable number of contiguous bad chapters */
|
||||
#define MAX_BAD_CHAPTERS 100
|
||||
#define VOLUME_CACHE_MAX_ENTRIES (U16_MAX >> 1)
|
||||
#define VOLUME_CACHE_QUEUED_FLAG (1 << 15)
|
||||
#define VOLUME_CACHE_MAX_QUEUED_READS 4096
|
||||
|
||||
static const u64 BAD_CHAPTER = U64_MAX;
|
||||
|
||||
|
@ -56,13 +56,11 @@
|
||||
#include "numeric.h"
|
||||
#include "permassert.h"
|
||||
|
||||
enum {
|
||||
DEFAULT_CAPACITY = 16, /* the number of neighborhoods in a new table */
|
||||
NEIGHBORHOOD = 255, /* the number of buckets in each neighborhood */
|
||||
MAX_PROBES = 1024, /* limit on the number of probes for a free bucket */
|
||||
NULL_HOP_OFFSET = 0, /* the hop offset value terminating the hop list */
|
||||
DEFAULT_LOAD = 75 /* a compromise between memory use and performance */
|
||||
};
|
||||
#define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */
|
||||
#define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */
|
||||
#define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */
|
||||
#define NULL_HOP_OFFSET 0 /* the hop offset value terminating the hop list */
|
||||
#define DEFAULT_LOAD 75 /* a compromise between memory use and performance */
|
||||
|
||||
/**
|
||||
* struct bucket - hash bucket
|
||||
|
@ -21,9 +21,7 @@
|
||||
#include "physical-zone.h"
|
||||
#include "vdo.h"
|
||||
|
||||
enum {
|
||||
ALLOCATIONS_PER_ZONE = 128,
|
||||
};
|
||||
#define ALLOCATIONS_PER_ZONE 128
|
||||
|
||||
/**
|
||||
* as_logical_zone() - Convert a generic vdo_completion to a logical_zone.
|
||||
|
@ -30,9 +30,7 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = {
|
||||
.minor_version = 0,
|
||||
};
|
||||
|
||||
enum {
|
||||
COMPRESSED_BLOCK_1_0_SIZE = 4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS),
|
||||
};
|
||||
#define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
|
||||
|
||||
/**
|
||||
* vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
|
||||
|
@ -23,10 +23,8 @@
|
||||
#include "status-codes.h"
|
||||
#include "vdo.h"
|
||||
|
||||
enum {
|
||||
/* Each user data_vio needs a PBN read lock and write lock. */
|
||||
LOCK_POOL_CAPACITY = 2 * MAXIMUM_VDO_USER_VIOS,
|
||||
};
|
||||
/* Each user data_vio needs a PBN read lock and write lock. */
|
||||
#define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS)
|
||||
|
||||
struct pbn_lock_implementation {
|
||||
enum pbn_lock_type type;
|
||||
|
@ -14,9 +14,7 @@
|
||||
#include "status-codes.h"
|
||||
|
||||
/* We use a single 64-bit search vector, so the maximum priority is 63 */
|
||||
enum {
|
||||
MAX_PRIORITY = 63
|
||||
};
|
||||
#define MAX_PRIORITY 63
|
||||
|
||||
/*
|
||||
* All the entries with the same priority are queued in a circular list in a bucket for that
|
||||
|
@ -26,15 +26,13 @@
|
||||
|
||||
static const u64 RECOVERY_COUNT_MASK = 0xff;
|
||||
|
||||
enum {
|
||||
/*
|
||||
* The number of reserved blocks must be large enough to prevent a new recovery journal
|
||||
* block write from overwriting a block which appears to still be a valid head block of the
|
||||
* journal. Currently, that means reserving enough space for all 2048 data_vios.
|
||||
*/
|
||||
RECOVERY_JOURNAL_RESERVED_BLOCKS =
|
||||
(MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2,
|
||||
};
|
||||
/*
|
||||
* The number of reserved blocks must be large enough to prevent a new recovery journal
|
||||
* block write from overwriting a block which appears to still be a valid head block of the
|
||||
* journal. Currently, that means reserving enough space for all 2048 data_vios.
|
||||
*/
|
||||
#define RECOVERY_JOURNAL_RESERVED_BLOCKS \
|
||||
((MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2)
|
||||
|
||||
/**
|
||||
* DOC: Lock Counters.
|
||||
|
@ -27,11 +27,9 @@ struct thread {
|
||||
struct completion thread_done;
|
||||
};
|
||||
|
||||
enum {
|
||||
ONCE_NOT_DONE = 0,
|
||||
ONCE_IN_PROGRESS = 1,
|
||||
ONCE_COMPLETE = 2,
|
||||
};
|
||||
#define ONCE_NOT_DONE 0
|
||||
#define ONCE_IN_PROGRESS 1
|
||||
#define ONCE_COMPLETE 2
|
||||
|
||||
/* Run a function once only, and record that fact in the atomic value. */
|
||||
void vdo_perform_once(atomic_t *once, void (*function)(void))
|
||||
|
@ -60,7 +60,7 @@
|
||||
#include "status-codes.h"
|
||||
#include "vio.h"
|
||||
|
||||
enum { PARANOID_THREAD_CONSISTENCY_CHECKS = 0 };
|
||||
#define PARANOID_THREAD_CONSISTENCY_CHECKS 0
|
||||
|
||||
struct sync_completion {
|
||||
struct vdo_completion vdo_completion;
|
||||
|
Loading…
x
Reference in New Issue
Block a user