bcachefs: Assorted sparse fixes

- endianness fixes
 - mark some things static
 - fix a few __percpu annotations
 - fix silent enum conversions

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-07-06 22:47:42 -04:00
parent 236b68da50
commit 73bd774d28
38 changed files with 115 additions and 118 deletions

View File

@ -225,6 +225,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
struct btree_trans trans;
struct btree_iter iter = { NULL };
struct bkey_s_c_xattr xattr;
@ -237,9 +238,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
bch2_trans_begin(&trans);
ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc,
&hash, inode_inum(inode),
&X_SEARCH(acl_to_xattr_type(type), "", 0),
0);
&hash, inode_inum(inode), &search, 0);
if (ret) {
if (!bch2_err_matches(ret, ENOENT))
acl = ERR_PTR(ret);
@ -364,6 +363,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
struct posix_acl **new_acl)
{
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
@ -372,9 +372,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
&hash_info, inum,
&X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
BTREE_ITER_INTENT);
&hash_info, inum, &search, BTREE_ITER_INTENT);
if (ret)
return bch2_err_matches(ret, ENOENT) ? 0 : ret;

View File

@ -929,7 +929,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
* extents style btrees, but works on non-extents btrees:
*/
struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
{
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
@ -1000,7 +1000,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
return ca != NULL;
}
struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
{
struct bch_fs *c = iter->trans->c;
struct bkey_s_c k;

View File

@ -590,10 +590,10 @@ static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
return div_u64(mem_bytes >> 1, btree_bytes(c));
}
int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
unsigned btree_leaf_mask,
unsigned btree_interior_mask,
struct bbpos start, struct bbpos *end)
static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
unsigned btree_leaf_mask,
unsigned btree_interior_mask,
struct bbpos start, struct bbpos *end)
{
struct btree_iter iter;
struct bkey_s_c k;
@ -691,8 +691,8 @@ static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
: bucket;
}
int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
struct bpos start, struct bpos *end)
static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
struct bpos start, struct bpos *end)
{
struct btree_iter alloc_iter;
struct btree_iter bp_iter;

View File

@ -1371,19 +1371,19 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
__u8 devs[];
__u8 devs[0];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[];
struct bch_replicas_entry_v0 entries[0];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
__u8 devs[];
__u8 devs[0];
} __packed;
#define replicas_entry_bytes(_i) \
@ -1391,7 +1391,7 @@ struct bch_replicas_entry {
struct bch_sb_field_replicas {
struct bch_sb_field field;
struct bch_replicas_entry entries[];
struct bch_replicas_entry entries[0];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */

View File

@ -480,7 +480,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
u->k.p.snapshot = write
? 0 : U32_MAX;
} else {
u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]);
u64 max_packed = min_packed +
~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);

View File

@ -599,11 +599,10 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k,
return (u16) v;
}
__always_inline
static inline void make_bfloat(struct btree *b, struct bset_tree *t,
unsigned j,
struct bkey_packed *min_key,
struct bkey_packed *max_key)
static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
unsigned j,
struct bkey_packed *min_key,
struct bkey_packed *max_key)
{
struct bkey_float *f = bkey_float(b, t, j);
struct bkey_packed *m = tree_to_bkey(b, t, j);

View File

@ -45,7 +45,11 @@ static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
case KEY_TYPE_btree_ptr:
return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
case KEY_TYPE_btree_ptr_v2:
return bkey_i_to_btree_ptr_v2_c(k)->v.seq;
/*
* The cast/deref is only necessary to avoid sparse endianness
* warnings:
*/
return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
default:
return 0;
}

View File

@ -1229,7 +1229,7 @@ static int bch2_gc_done(struct bch_fs *c,
for_each_member_device(ca, c, dev) {
struct bch_dev_usage *dst = ca->usage_base;
struct bch_dev_usage *src = (void *)
bch2_acc_percpu_u64s((void *) ca->usage_gc,
bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
copy_dev_field(buckets_ec, "buckets_ec");
@ -1245,7 +1245,7 @@ static int bch2_gc_done(struct bch_fs *c,
unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = c->usage_base;
struct bch_fs_usage *src = (void *)
bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");

View File

@ -1281,7 +1281,7 @@ struct btree_node_read_all {
unsigned nr;
void *buf[BCH_REPLICAS_MAX];
struct bio *bio[BCH_REPLICAS_MAX];
int err[BCH_REPLICAS_MAX];
blk_status_t err[BCH_REPLICAS_MAX];
};
static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
@ -2230,7 +2230,7 @@ bool bch2_btree_flush_all_writes(struct bch_fs *c)
return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
}
const char * const bch2_btree_write_types[] = {
static const char * const bch2_btree_write_types[] = {
#define x(t, n) [n] = #t,
BCH_BTREE_WRITE_TYPES()
NULL

View File

@ -178,7 +178,7 @@ static inline void compat_bformat(unsigned level, enum btree_id btree_id,
f->field_offset[BKEY_FIELD_SNAPSHOT] = write
? 0
: U32_MAX - max_packed;
: cpu_to_le64(U32_MAX - max_packed);
}
}
@ -200,7 +200,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
struct btree_node *bn)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
write)
bn->min_key = bpos_nosnap_predecessor(bn->min_key);
@ -217,7 +217,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
bn->max_key.snapshot = U32_MAX;
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bpos_nosnap_successor(bn->min_key);

View File

@ -1438,7 +1438,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
prt_newline(out);
}
noinline __cold
static noinline __cold
void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
bool nosort)
{
@ -1458,7 +1458,7 @@ void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
__bch2_trans_paths_to_text(out, trans, false);
}
noinline __cold
static noinline __cold
void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
{
struct printbuf buf = PRINTBUF;
@ -1867,9 +1867,9 @@ static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
: NULL;
}
struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
struct btree_iter *iter,
struct bpos end_pos)
static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
struct btree_iter *iter,
struct bpos end_pos)
{
struct bkey_i *k;

View File

@ -283,7 +283,7 @@ static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
}
__always_inline
static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
BUG_ON(err <= 0);
BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
@ -294,7 +294,7 @@ static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int er
}
__always_inline
static inline int btree_trans_restart(struct btree_trans *trans, int err)
static int btree_trans_restart(struct btree_trans *trans, int err)
{
btree_trans_restart_nounlock(trans, err);
return -err;

View File

@ -598,13 +598,6 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
return 0;
}
__flatten
bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
return btree_path_get_locks(trans, path, true);
}
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)

View File

@ -93,7 +93,7 @@ static inline void mark_btree_node_locked(struct btree_trans *trans,
unsigned level,
enum six_lock_type type)
{
mark_btree_node_locked_noreset(path, level, type);
mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[level].lock_taken_time = local_clock();
#endif
@ -246,7 +246,7 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
trans_for_each_path(trans, path)
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
six_lock_increment(&b->lock, want);
six_lock_increment(&b->lock, (enum six_lock_type) want);
return true;
}
@ -266,7 +266,7 @@ static inline int btree_node_lock(struct btree_trans *trans,
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
!(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->level].lock_taken_time = local_clock();

View File

@ -681,6 +681,11 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
return (1U << type) & BTREE_ID_IS_EXTENTS;
}
static inline bool btree_id_is_extents(enum btree_id btree)
{
return btree_node_type_is_extents((enum btree_node_type) btree);
}
#define BTREE_ID_HAS_SNAPSHOTS \
((1U << BTREE_ID_extents)| \
(1U << BTREE_ID_inodes)| \

View File

@ -2036,7 +2036,7 @@ static int async_btree_node_rewrite_trans(struct btree_trans *trans,
return ret;
}
void async_btree_node_rewrite_work(struct work_struct *work)
static void async_btree_node_rewrite_work(struct work_struct *work)
{
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);

View File

@ -29,7 +29,7 @@
* bch2_btree_path_peek_slot() for a cached iterator might return a key in a
* different snapshot:
*/
struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
static struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
{
struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);

View File

@ -374,7 +374,7 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
struct bch_replicas_entry *r, s64 sectors,
unsigned journal_seq, bool gc)
{
struct bch_fs_usage __percpu *fs_usage;
struct bch_fs_usage *fs_usage;
int idx, ret = 0;
struct printbuf buf = PRINTBUF;
@ -1143,7 +1143,7 @@ int bch2_mark_inode(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct bch_fs_usage __percpu *fs_usage;
struct bch_fs_usage *fs_usage;
u64 journal_seq = trans->journal_res.seq;
if (flags & BTREE_TRIGGER_INSERT) {
@ -1176,7 +1176,7 @@ int bch2_mark_reservation(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bch_fs_usage __percpu *fs_usage;
struct bch_fs_usage *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;

View File

@ -360,7 +360,7 @@ struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
state.type = type;
bch2_checksum_init(&state);
state.seed = a.lo;
state.seed = (u64 __force) a.lo;
BUG_ON(!bch2_checksum_mergeable(type));
@ -371,7 +371,7 @@ struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
page_address(ZERO_PAGE(0)), b);
b_len -= b;
}
a.lo = bch2_checksum_final(&state);
a.lo = (__le64 __force) bch2_checksum_final(&state);
a.lo ^= b.lo;
a.hi ^= b.hi;
return a;
@ -597,7 +597,7 @@ int bch2_disable_encryption(struct bch_fs *c)
if (ret)
goto out;
crypt->key.magic = BCH_KEY_MAGIC;
crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
crypt->key.key = key;
SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
@ -625,7 +625,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
if (ret)
goto err;
key.magic = BCH_KEY_MAGIC;
key.magic = cpu_to_le64(BCH_KEY_MAGIC);
get_random_bytes(&key.key, sizeof(key.key));
if (keyed) {

View File

@ -5,7 +5,7 @@
/* BCH_SB_FIELD_counters */
const char * const bch2_counter_names[] = {
static const char * const bch2_counter_names[] = {
#define x(t, n, ...) (#t),
BCH_PERSISTENT_COUNTERS()
#undef x
@ -27,7 +27,7 @@ static int bch2_sb_counters_validate(struct bch_sb *sb,
return 0;
};
void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);

View File

@ -219,7 +219,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
int ret = 0;
if (d.v->d_type == DT_SUBVOL &&
d.v->d_parent_subvol != dir.subvol)
le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
return 1;
if (likely(d.v->d_type != DT_SUBVOL)) {

View File

@ -385,7 +385,7 @@ static void ec_block_endio(struct bio *bio)
}
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
unsigned rw, unsigned idx, struct closure *cl)
blk_opf_t opf, unsigned idx, struct closure *cl)
{
struct bch_stripe *v = &buf->key.v;
unsigned offset = 0, bytes = buf->size << 9;
@ -394,6 +394,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
? BCH_DATA_user
: BCH_DATA_parity;
int rw = op_is_write(opf);
if (ptr_stale(ca, ptr)) {
bch_err_ratelimited(c,
@ -419,7 +420,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
nr_iovecs,
rw,
opf,
GFP_KERNEL,
&c->ec_bioset),
struct ec_bio, bio);
@ -1380,11 +1381,12 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
mutex_unlock(&h->lock);
}
struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
unsigned target,
unsigned algo,
unsigned redundancy,
enum bch_watermark watermark)
static struct ec_stripe_head *
__bch2_ec_stripe_head_get(struct btree_trans *trans,
unsigned target,
unsigned algo,
unsigned redundancy,
enum bch_watermark watermark)
{
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
@ -1570,7 +1572,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
}
BUG_ON(h->s->existing_stripe.size != h->blocksize);
BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
BUG_ON(h->s->existing_stripe.size != le16_to_cpu(h->s->existing_stripe.key.v.sectors));
/*
* Free buckets we initially allocated - they might conflict with

View File

@ -216,7 +216,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
btree_id_is_extents(btree_id) &&
!bkey_eq(bp.v->min_key, POS_MIN))
bp.v->min_key = write
? bpos_nosnap_predecessor(bp.v->min_key)
@ -514,13 +514,13 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
set_common_fields(dst->crc32, src);
dst->crc32.csum = *((__le32 *) &src.csum.lo);
dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
break;
case BCH_EXTENT_ENTRY_crc64:
set_common_fields(dst->crc64, src);
dst->crc64.nonce = src.nonce;
dst->crc64.csum_lo = src.csum.lo;
dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
dst->crc64.csum_lo = (u64 __force) src.csum.lo;
dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
break;
case BCH_EXTENT_ENTRY_crc128:
set_common_fields(dst->crc128, src);

View File

@ -154,11 +154,7 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
common_fields(crc->crc32),
};
*((__le32 *) &ret.csum.lo) = crc->crc32.csum;
memcpy(&ret.csum.lo, &crc->crc32.csum,
sizeof(crc->crc32.csum));
*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
return ret;
}
case BCH_EXTENT_ENTRY_crc64: {
@ -168,7 +164,7 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
.csum.lo = (__force __le64) crc->crc64.csum_lo,
};
*((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
return ret;
}

View File

@ -458,7 +458,7 @@ enum bch_folio_sector_state {
#undef x
};
const char * const bch2_folio_sector_states[] = {
static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
@ -997,7 +997,7 @@ vm_fault_t bch2_page_fault(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
int ret;
vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
@ -1039,7 +1039,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
int ret;
vm_fault_t ret;
bch2_folio_reservation_init(c, inode, &res);

View File

@ -1696,8 +1696,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
if (fsck_err_on(ret, c,
"dirent points to missing subvolume %llu",
le64_to_cpu(d.v->d_child_subvol))) {
"dirent points to missing subvolume %u",
le32_to_cpu(d.v->d_child_subvol))) {
ret = __remove_dirent(trans, d.k->p);
goto err;
}
@ -2238,7 +2238,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
* Backpointer and directory structure checks are sufficient for
* directories, since they can't have hardlinks:
*/
if (S_ISDIR(le16_to_cpu(u.bi_mode)))
if (S_ISDIR(u.bi_mode))
continue;
if (!u.bi_nlink)
@ -2324,7 +2324,7 @@ static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_ite
BUG_ON(bch2_inode_unpack(k, &u));
if (S_ISDIR(le16_to_cpu(u.bi_mode)))
if (S_ISDIR(u.bi_mode))
return 0;
if (!u.bi_nlink)

View File

@ -1878,7 +1878,7 @@ void bch2_write(struct closure *cl)
op->end_io(op);
}
const char * const bch2_write_flags[] = {
static const char * const bch2_write_flags[] = {
#define x(f) #f,
BCH_WRITE_FLAGS()
#undef x

View File

@ -361,7 +361,7 @@ void bch2_journal_pin_drop(struct journal *j,
spin_unlock(&j->lock);
}
enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
{
if (fn == bch2_btree_node_flush0 ||
fn == bch2_btree_node_flush1)

View File

@ -201,16 +201,16 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca,
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
j->d[dst].start = le64_to_cpu(buckets[0]);
j->d[dst].nr = le64_to_cpu(1);
j->d[dst].start = cpu_to_le64(buckets[0]);
j->d[dst].nr = cpu_to_le64(1);
for (i = 1; i < nr; i++) {
if (buckets[i] == buckets[i - 1] + 1) {
le64_add_cpu(&j->d[dst].nr, 1);
} else {
dst++;
j->d[dst].start = le64_to_cpu(buckets[i]);
j->d[dst].nr = le64_to_cpu(1);
j->d[dst].start = cpu_to_le64(buckets[i]);
j->d[dst].nr = cpu_to_le64(1);
}
}

View File

@ -480,13 +480,13 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
}
if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer);
mq->c[Q_SPC].timer = qdq->d_spc_timer;
if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns);
mq->c[Q_SPC].warns = qdq->d_spc_warns;
if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer);
mq->c[Q_INO].timer = qdq->d_ino_timer;
if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns);
mq->c[Q_INO].warns = qdq->d_ino_warns;
mutex_unlock(&q->lock);
}

View File

@ -308,7 +308,7 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
}
}
struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
{
struct journal_key *k = iter->keys->d + iter->idx;
@ -1042,7 +1042,7 @@ static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
root_snapshot.k.p.offset = U32_MAX;
root_snapshot.v.flags = 0;
root_snapshot.v.parent = 0;
root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
root_snapshot.v.tree = cpu_to_le32(1);
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
@ -1468,7 +1468,7 @@ int bch2_fs_recovery(struct bch_fs *c)
if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
struct bch_move_stats stats;
bch2_move_stats_init(&stats, "recovery");

View File

@ -36,8 +36,8 @@ static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
}
void bch2_replicas_entry_v0_to_text(struct printbuf *out,
struct bch_replicas_entry_v0 *e)
static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
struct bch_replicas_entry_v0 *e)
{
unsigned i;
@ -272,7 +272,7 @@ static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
{
unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
struct bch_fs_usage *dst, *src = (void *)
bch2_acc_percpu_u64s((void *) src_p, src_nr);
bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
preempt_disable();
dst = this_cpu_ptr(dst_p);

View File

@ -825,7 +825,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
goto err;
if (s.v->children[0]) {
s_t->v.root_snapshot = cpu_to_le32(s.v->children[0]);
s_t->v.root_snapshot = s.v->children[0];
} else {
s_t->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&s_t->k, 0);
@ -1328,7 +1328,7 @@ static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
__bch2_subvolume_delete(trans, subvolid));
}
void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs,
snapshot_wait_for_pagecache_and_delete_work);
@ -1366,7 +1366,7 @@ struct subvolume_unlink_hook {
u32 subvol;
};
int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
struct btree_trans_commit_hook *_h)
{
struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);

View File

@ -754,11 +754,11 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
goto err;
/* Compat: */
if (sb->version <= bcachefs_metadata_version_inode_v2 &&
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
if (sb->version <= bcachefs_metadata_version_inode_v2 &&
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
@ -1999,7 +1999,7 @@ static int __init bcachefs_init(void)
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
unsigned bch2_metadata_version = bcachefs_metadata_version_current;
static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
module_param_named(version, bch2_metadata_version, uint, 0400);
module_exit(bcachefs_exit);

View File

@ -202,7 +202,7 @@ read_attribute(nocow_lock_table);
#ifdef BCH_WRITE_REF_DEBUG
read_attribute(write_refs);
const char * const bch2_write_refs[] = {
static const char * const bch2_write_refs[] = {
#define x(n) #n,
BCH_WRITE_REFS()
#undef x

View File

@ -444,7 +444,7 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
/* extent unit tests */
u64 test_version;
static u64 test_version;
static int insert_test_extent(struct bch_fs *c,
u64 start, u64 end)

View File

@ -22,12 +22,13 @@ int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
unsigned bytes = DIV_ROUND_UP(bits, 7);
__le64 v_le;
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
v = cpu_to_le64(v);
memcpy(out, &v, bytes);
v_le = cpu_to_le64(v);
memcpy(out, &v_le, bytes);
} else {
*out++ = 255;
bytes = 9;
@ -57,9 +58,9 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
return -1;
if (likely(bytes < 9)) {
v = 0;
memcpy(&v, in, bytes);
v = le64_to_cpu(v);
__le64 v_le = 0;
memcpy(&v_le, in, bytes);
v = le64_to_cpu(v_le);
v >>= bytes;
} else {
v = get_unaligned_le64(++in);

View File

@ -135,15 +135,14 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
const char *name, void *buffer, size_t size, int type)
{
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_s_c k;
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
inode_inum(inode),
&X_SEARCH(type, name, strlen(name)),
0);
inode_inum(inode), &search, 0);
if (ret)
goto err1;