2018-06-05 19:42:14 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2005-11-02 14:58:39 +11:00
|
|
|
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-02 14:38:42 +11:00
|
|
|
#include "xfs_fs.h"
|
2013-10-29 22:11:58 +11:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 10:51:50 +11:00
|
|
|
#include "xfs_format.h"
|
2013-10-23 10:50:10 +11:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-11-02 14:38:42 +11:00
|
|
|
#include "xfs_bit.h"
|
2005-04-16 15:20:36 -07:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_btree.h"
|
2020-03-11 11:01:04 -07:00
|
|
|
#include "xfs_btree_staging.h"
|
2005-04-16 15:20:36 -07:00
|
|
|
#include "xfs_ialloc.h"
|
2013-10-23 10:51:50 +11:00
|
|
|
#include "xfs_ialloc_btree.h"
|
2005-04-16 15:20:36 -07:00
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_error.h"
|
2024-02-22 12:39:47 -08:00
|
|
|
#include "xfs_health.h"
|
2012-11-12 22:54:08 +11:00
|
|
|
#include "xfs_trace.h"
|
2013-10-23 10:50:10 +11:00
|
|
|
#include "xfs_trans.h"
|
2016-08-03 11:33:42 +10:00
|
|
|
#include "xfs_rmap.h"
|
2021-06-02 10:48:24 +10:00
|
|
|
#include "xfs_ag.h"
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2021-09-27 14:26:19 -07:00
|
|
|
static struct kmem_cache *xfs_inobt_cur_cache;
|
2021-09-23 12:21:37 -07:00
|
|
|
|
2008-10-30 16:58:01 +11:00
|
|
|
STATIC int
|
|
|
|
xfs_inobt_get_minrecs(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int level)
|
|
|
|
{
|
2019-06-05 11:19:34 -07:00
|
|
|
return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
|
2008-10-30 16:58:01 +11:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-10-30 16:53:59 +11:00
|
|
|
STATIC struct xfs_btree_cur *
|
|
|
|
xfs_inobt_dup_cursor(
|
|
|
|
struct xfs_btree_cur *cur)
|
|
|
|
{
|
2024-11-03 20:18:44 -08:00
|
|
|
return xfs_inobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
|
2024-02-22 12:40:49 -08:00
|
|
|
cur->bc_ag.agbp);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC struct xfs_btree_cur *
|
|
|
|
xfs_finobt_dup_cursor(
|
|
|
|
struct xfs_btree_cur *cur)
|
|
|
|
{
|
2024-11-03 20:18:44 -08:00
|
|
|
return xfs_finobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
|
2024-02-22 12:40:49 -08:00
|
|
|
cur->bc_ag.agbp);
|
2008-10-30 16:53:59 +11:00
|
|
|
}
|
|
|
|
|
2008-10-30 16:57:16 +11:00
|
|
|
STATIC void
|
|
|
|
xfs_inobt_set_root(
|
2021-08-12 09:49:03 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *nptr,
|
|
|
|
int inc) /* level change */
|
2008-10-30 16:57:16 +11:00
|
|
|
{
|
2020-03-10 17:51:15 -07:00
|
|
|
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
2020-03-10 08:57:29 -07:00
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
2008-10-30 16:57:16 +11:00
|
|
|
|
|
|
|
agi->agi_root = nptr->s;
|
|
|
|
be32_add_cpu(&agi->agi_level, inc);
|
|
|
|
xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
|
|
|
|
}
|
|
|
|
|
2014-04-24 16:00:52 +10:00
|
|
|
STATIC void
|
|
|
|
xfs_finobt_set_root(
|
2021-08-12 09:49:03 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *nptr,
|
|
|
|
int inc) /* level change */
|
2014-04-24 16:00:52 +10:00
|
|
|
{
|
2020-03-10 17:51:15 -07:00
|
|
|
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
2020-03-10 08:57:29 -07:00
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
2014-04-24 16:00:52 +10:00
|
|
|
|
|
|
|
agi->agi_free_root = nptr->s;
|
|
|
|
be32_add_cpu(&agi->agi_free_level, inc);
|
|
|
|
xfs_ialloc_log_agi(cur->bc_tp, agbp,
|
|
|
|
XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
|
|
|
|
}
|
|
|
|
|
2020-08-17 09:58:01 -07:00
|
|
|
/* Update the inode btree block counter for this btree. */
|
|
|
|
static inline void
|
|
|
|
xfs_inobt_mod_blockcount(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int howmuch)
|
|
|
|
{
|
|
|
|
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
|
|
|
|
2021-08-18 18:46:55 -07:00
|
|
|
if (!xfs_has_inobtcounts(cur->bc_mp))
|
2020-08-17 09:58:01 -07:00
|
|
|
return;
|
|
|
|
|
2024-02-22 12:40:51 -08:00
|
|
|
if (xfs_btree_is_fino(cur->bc_ops))
|
2020-08-17 09:58:01 -07:00
|
|
|
be32_add_cpu(&agi->agi_fblocks, howmuch);
|
2024-02-22 12:40:51 -08:00
|
|
|
else
|
2020-08-17 09:58:01 -07:00
|
|
|
be32_add_cpu(&agi->agi_iblocks, howmuch);
|
|
|
|
xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
|
|
|
|
}
|
|
|
|
|
2008-10-30 16:57:03 +11:00
|
|
|
STATIC int
|
2017-01-25 07:49:35 -08:00
|
|
|
__xfs_inobt_alloc_block(
|
2021-08-12 09:53:27 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *start,
|
|
|
|
union xfs_btree_ptr *new,
|
|
|
|
int *stat,
|
|
|
|
enum xfs_ag_resv_type resv)
|
2008-10-30 16:57:03 +11:00
|
|
|
{
|
|
|
|
xfs_alloc_arg_t args; /* block allocation args */
|
|
|
|
int error; /* error return value */
|
|
|
|
xfs_agblock_t sbno = be32_to_cpu(start->s);
|
|
|
|
|
|
|
|
memset(&args, 0, sizeof(args));
|
|
|
|
args.tp = cur->bc_tp;
|
|
|
|
args.mp = cur->bc_mp;
|
2024-11-03 20:18:44 -08:00
|
|
|
args.pag = to_perag(cur->bc_group);
|
2018-12-12 08:46:23 -08:00
|
|
|
args.oinfo = XFS_RMAP_OINFO_INOBT;
|
2008-10-30 16:57:03 +11:00
|
|
|
args.minlen = 1;
|
|
|
|
args.maxlen = 1;
|
|
|
|
args.prod = 1;
|
2017-01-25 07:49:35 -08:00
|
|
|
args.resv = resv;
|
2008-10-30 16:57:03 +11:00
|
|
|
|
2023-02-13 09:14:54 +11:00
|
|
|
error = xfs_alloc_vextent_near_bno(&args,
|
2024-11-03 20:18:28 -08:00
|
|
|
xfs_agbno_to_fsb(args.pag, sbno));
|
2018-03-06 17:03:30 -08:00
|
|
|
if (error)
|
2008-10-30 16:57:03 +11:00
|
|
|
return error;
|
2018-03-06 17:03:30 -08:00
|
|
|
|
2008-10-30 16:57:03 +11:00
|
|
|
if (args.fsbno == NULLFSBLOCK) {
|
|
|
|
*stat = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ASSERT(args.len == 1);
|
|
|
|
|
|
|
|
new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
|
|
|
|
*stat = 1;
|
2020-08-17 09:58:01 -07:00
|
|
|
xfs_inobt_mod_blockcount(cur, 1);
|
2008-10-30 16:57:03 +11:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-25 07:49:35 -08:00
|
|
|
STATIC int
|
|
|
|
xfs_inobt_alloc_block(
|
2021-08-12 09:53:27 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *start,
|
|
|
|
union xfs_btree_ptr *new,
|
|
|
|
int *stat)
|
2017-01-25 07:49:35 -08:00
|
|
|
{
|
|
|
|
return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_finobt_alloc_block(
|
2021-08-12 09:53:27 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *start,
|
|
|
|
union xfs_btree_ptr *new,
|
|
|
|
int *stat)
|
2017-01-25 07:49:35 -08:00
|
|
|
{
|
2019-02-14 09:33:15 -08:00
|
|
|
if (cur->bc_mp->m_finobt_nores)
|
2018-01-12 14:07:21 -08:00
|
|
|
return xfs_inobt_alloc_block(cur, start, new, stat);
|
2017-01-25 07:49:35 -08:00
|
|
|
return __xfs_inobt_alloc_block(cur, start, new, stat,
|
|
|
|
XFS_AG_RESV_METADATA);
|
|
|
|
}
|
|
|
|
|
2008-10-30 16:57:51 +11:00
|
|
|
STATIC int
|
2018-01-12 14:07:21 -08:00
|
|
|
__xfs_inobt_free_block(
|
2008-10-30 16:57:51 +11:00
|
|
|
struct xfs_btree_cur *cur,
|
2018-01-12 14:07:21 -08:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
enum xfs_ag_resv_type resv)
|
2008-10-30 16:57:51 +11:00
|
|
|
{
|
2023-04-11 18:59:53 -07:00
|
|
|
xfs_fsblock_t fsbno;
|
|
|
|
|
2020-08-17 09:58:01 -07:00
|
|
|
xfs_inobt_mod_blockcount(cur, -1);
|
2023-04-11 18:59:53 -07:00
|
|
|
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
|
2023-06-28 11:04:32 -07:00
|
|
|
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
|
2024-07-02 11:22:51 -07:00
|
|
|
&XFS_RMAP_OINFO_INOBT, resv, 0);
|
2018-01-12 14:07:21 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_inobt_free_block(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_finobt_free_block(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2019-02-14 09:33:15 -08:00
|
|
|
if (cur->bc_mp->m_finobt_nores)
|
2018-01-12 14:07:21 -08:00
|
|
|
return xfs_inobt_free_block(cur, bp);
|
|
|
|
return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
|
2008-10-30 16:57:51 +11:00
|
|
|
}
|
2008-10-30 16:57:03 +11:00
|
|
|
|
2008-10-30 16:55:23 +11:00
|
|
|
STATIC int
|
|
|
|
xfs_inobt_get_maxrecs(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int level)
|
|
|
|
{
|
2019-06-05 11:19:34 -07:00
|
|
|
return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
|
2008-10-30 16:55:23 +11:00
|
|
|
}
|
|
|
|
|
2008-10-30 16:56:09 +11:00
|
|
|
STATIC void
|
|
|
|
xfs_inobt_init_key_from_rec(
|
2021-08-10 17:02:16 -07:00
|
|
|
union xfs_btree_key *key,
|
|
|
|
const union xfs_btree_rec *rec)
|
2008-10-30 16:56:09 +11:00
|
|
|
{
|
|
|
|
key->inobt.ir_startino = rec->inobt.ir_startino;
|
|
|
|
}
|
|
|
|
|
2017-06-16 11:00:08 -07:00
|
|
|
STATIC void
|
|
|
|
xfs_inobt_init_high_key_from_rec(
|
2021-08-10 17:02:16 -07:00
|
|
|
union xfs_btree_key *key,
|
|
|
|
const union xfs_btree_rec *rec)
|
2017-06-16 11:00:08 -07:00
|
|
|
{
|
2021-08-10 17:02:16 -07:00
|
|
|
__u32 x;
|
2017-06-16 11:00:08 -07:00
|
|
|
|
|
|
|
x = be32_to_cpu(rec->inobt.ir_startino);
|
|
|
|
x += XFS_INODES_PER_CHUNK - 1;
|
|
|
|
key->inobt.ir_startino = cpu_to_be32(x);
|
|
|
|
}
|
|
|
|
|
2008-10-30 16:57:40 +11:00
|
|
|
STATIC void
|
|
|
|
xfs_inobt_init_rec_from_cur(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *rec)
|
|
|
|
{
|
|
|
|
rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
|
2021-08-18 18:46:37 -07:00
|
|
|
if (xfs_has_sparseinodes(cur->bc_mp)) {
|
2015-05-29 09:03:04 +10:00
|
|
|
rec->inobt.ir_u.sp.ir_holemask =
|
|
|
|
cpu_to_be16(cur->bc_rec.i.ir_holemask);
|
|
|
|
rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
|
|
|
|
rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
|
|
|
|
} else {
|
|
|
|
/* ir_holemask/ir_count not supported on-disk */
|
|
|
|
rec->inobt.ir_u.f.ir_freecount =
|
|
|
|
cpu_to_be32(cur->bc_rec.i.ir_freecount);
|
|
|
|
}
|
2008-10-30 16:57:40 +11:00
|
|
|
rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
|
|
|
|
}
|
|
|
|
|
2008-10-30 16:56:09 +11:00
|
|
|
/*
|
2009-03-29 09:55:42 +02:00
|
|
|
* initial value of ptr for lookup
|
2008-10-30 16:56:09 +11:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inobt_init_ptr_from_cur(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *ptr)
|
|
|
|
{
|
2020-03-10 17:51:15 -07:00
|
|
|
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
|
2008-10-30 16:56:09 +11:00
|
|
|
|
2024-11-03 20:18:44 -08:00
|
|
|
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
|
2008-10-30 16:56:09 +11:00
|
|
|
|
|
|
|
ptr->s = agi->agi_root;
|
|
|
|
}
|
|
|
|
|
2014-04-24 16:00:52 +10:00
|
|
|
STATIC void
|
|
|
|
xfs_finobt_init_ptr_from_cur(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *ptr)
|
|
|
|
{
|
2020-03-10 17:51:15 -07:00
|
|
|
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
|
2014-04-24 16:00:52 +10:00
|
|
|
|
2024-11-03 20:18:44 -08:00
|
|
|
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
|
|
|
|
|
2014-04-24 16:00:52 +10:00
|
|
|
ptr->s = agi->agi_free_root;
|
|
|
|
}
|
|
|
|
|
2017-06-16 11:00:05 -07:00
|
|
|
STATIC int64_t
|
2008-10-30 16:56:09 +11:00
|
|
|
xfs_inobt_key_diff(
|
2021-08-10 17:02:15 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_key *key)
|
2008-10-30 16:56:09 +11:00
|
|
|
{
|
2017-06-16 11:00:05 -07:00
|
|
|
return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
|
2008-10-30 16:56:09 +11:00
|
|
|
cur->bc_rec.i.ir_startino;
|
|
|
|
}
|
|
|
|
|
2017-06-16 11:00:08 -07:00
|
|
|
STATIC int64_t
|
|
|
|
xfs_inobt_diff_two_keys(
|
2021-08-10 17:02:15 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_key *k1,
|
2023-04-11 19:00:11 -07:00
|
|
|
const union xfs_btree_key *k2,
|
|
|
|
const union xfs_btree_key *mask)
|
2017-06-16 11:00:08 -07:00
|
|
|
{
|
2023-04-11 19:00:11 -07:00
|
|
|
ASSERT(!mask || mask->inobt.ir_startino);
|
|
|
|
|
2017-06-16 11:00:08 -07:00
|
|
|
return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
|
2023-04-11 19:00:11 -07:00
|
|
|
be32_to_cpu(k2->inobt.ir_startino);
|
2017-06-16 11:00:08 -07:00
|
|
|
}
|
|
|
|
|
2018-01-08 10:51:03 -08:00
|
|
|
static xfs_failaddr_t
|
2012-11-14 17:52:32 +11:00
|
|
|
xfs_inobt_verify(
|
2012-11-12 22:54:08 +11:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2019-06-28 19:27:29 -07:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2012-11-12 22:54:08 +11:00
|
|
|
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
|
2018-01-08 10:51:03 -08:00
|
|
|
xfs_failaddr_t fa;
|
2012-11-12 22:54:08 +11:00
|
|
|
unsigned int level;
|
|
|
|
|
2019-02-07 10:45:46 -08:00
|
|
|
if (!xfs_verify_magic(bp, block->bb_magic))
|
|
|
|
return __this_address;
|
|
|
|
|
2013-04-21 14:53:46 -05:00
|
|
|
/*
|
|
|
|
* During growfs operations, we can't verify the exact owner as the
|
|
|
|
* perag is not fully initialised and hence not attached to the buffer.
|
|
|
|
*
|
|
|
|
* Similarly, during log recovery we will have a perag structure
|
|
|
|
* attached, but the agi information will not yet have been initialised
|
|
|
|
* from the on disk AGI. We don't currently use any of this information,
|
2023-02-13 09:14:52 +11:00
|
|
|
* but beware of the landmine (i.e. need to check
|
|
|
|
* xfs_perag_initialised_agi(pag)) if we ever do.
|
2013-04-21 14:53:46 -05:00
|
|
|
*/
|
2021-08-18 18:46:55 -07:00
|
|
|
if (xfs_has_crc(mp)) {
|
2024-02-22 12:40:58 -08:00
|
|
|
fa = xfs_btree_agblock_v5hdr_verify(bp);
|
2018-01-08 10:51:03 -08:00
|
|
|
if (fa)
|
|
|
|
return fa;
|
2013-04-21 14:53:46 -05:00
|
|
|
}
|
2012-11-12 22:54:08 +11:00
|
|
|
|
2016-01-04 16:13:21 +11:00
|
|
|
/* level verification */
|
2013-04-21 14:53:46 -05:00
|
|
|
level = be16_to_cpu(block->bb_level);
|
2019-06-05 11:19:34 -07:00
|
|
|
if (level >= M_IGEO(mp)->inobt_maxlevels)
|
2018-01-08 10:51:03 -08:00
|
|
|
return __this_address;
|
2013-04-21 14:53:46 -05:00
|
|
|
|
2024-02-22 12:40:58 -08:00
|
|
|
return xfs_btree_agblock_verify(bp,
|
2019-06-05 11:19:34 -07:00
|
|
|
M_IGEO(mp)->inobt_mxr[level != 0]);
|
2012-11-14 17:52:32 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-11-14 17:54:40 +11:00
|
|
|
xfs_inobt_read_verify(
|
2012-11-14 17:52:32 +11:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2018-01-08 10:51:03 -08:00
|
|
|
xfs_failaddr_t fa;
|
|
|
|
|
2024-02-22 12:40:58 -08:00
|
|
|
if (!xfs_btree_agblock_verify_crc(bp))
|
2018-01-08 10:51:03 -08:00
|
|
|
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
|
|
|
|
else {
|
|
|
|
fa = xfs_inobt_verify(bp);
|
|
|
|
if (fa)
|
|
|
|
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
|
|
|
|
}
|
2014-02-27 15:23:10 +11:00
|
|
|
|
2018-01-08 10:51:02 -08:00
|
|
|
if (bp->b_error)
|
2014-02-27 15:23:10 +11:00
|
|
|
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
2012-11-14 17:52:32 +11:00
|
|
|
}
|
2012-11-12 22:54:08 +11:00
|
|
|
|
2012-11-14 17:54:40 +11:00
|
|
|
static void
|
|
|
|
xfs_inobt_write_verify(
|
2012-11-14 17:52:32 +11:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2018-01-08 10:51:03 -08:00
|
|
|
xfs_failaddr_t fa;
|
|
|
|
|
|
|
|
fa = xfs_inobt_verify(bp);
|
|
|
|
if (fa) {
|
2013-04-21 14:53:46 -05:00
|
|
|
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
2018-01-08 10:51:03 -08:00
|
|
|
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
|
2014-02-27 15:14:31 +11:00
|
|
|
return;
|
2013-04-21 14:53:46 -05:00
|
|
|
}
|
2024-02-22 12:40:58 -08:00
|
|
|
xfs_btree_agblock_calc_crc(bp);
|
2013-04-21 14:53:46 -05:00
|
|
|
|
2012-11-12 22:54:08 +11:00
|
|
|
}
|
|
|
|
|
2012-11-14 17:54:40 +11:00
|
|
|
const struct xfs_buf_ops xfs_inobt_buf_ops = {
|
2016-01-04 16:10:19 +11:00
|
|
|
.name = "xfs_inobt",
|
2019-02-07 10:45:46 -08:00
|
|
|
.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
|
2012-11-14 17:54:40 +11:00
|
|
|
.verify_read = xfs_inobt_read_verify,
|
|
|
|
.verify_write = xfs_inobt_write_verify,
|
2018-01-08 10:51:08 -08:00
|
|
|
.verify_struct = xfs_inobt_verify,
|
2012-11-14 17:54:40 +11:00
|
|
|
};
|
|
|
|
|
2019-02-07 10:45:46 -08:00
|
|
|
const struct xfs_buf_ops xfs_finobt_buf_ops = {
|
|
|
|
.name = "xfs_finobt",
|
2019-02-07 10:45:46 -08:00
|
|
|
.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
|
|
|
|
cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
|
2019-02-07 10:45:46 -08:00
|
|
|
.verify_read = xfs_inobt_read_verify,
|
|
|
|
.verify_write = xfs_inobt_write_verify,
|
|
|
|
.verify_struct = xfs_inobt_verify,
|
|
|
|
};
|
|
|
|
|
2008-10-30 16:58:32 +11:00
|
|
|
STATIC int
|
|
|
|
xfs_inobt_keys_inorder(
|
2021-08-10 17:02:17 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_key *k1,
|
|
|
|
const union xfs_btree_key *k2)
|
2008-10-30 16:58:32 +11:00
|
|
|
{
|
|
|
|
return be32_to_cpu(k1->inobt.ir_startino) <
|
|
|
|
be32_to_cpu(k2->inobt.ir_startino);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_inobt_recs_inorder(
|
2021-08-10 17:02:17 -07:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_rec *r1,
|
|
|
|
const union xfs_btree_rec *r2)
|
2008-10-30 16:58:32 +11:00
|
|
|
{
|
|
|
|
return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
|
|
|
|
be32_to_cpu(r2->inobt.ir_startino);
|
|
|
|
}
|
|
|
|
|
2023-04-11 19:00:10 -07:00
|
|
|
STATIC enum xbtree_key_contig
|
|
|
|
xfs_inobt_keys_contiguous(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_key *key1,
|
2023-04-11 19:00:11 -07:00
|
|
|
const union xfs_btree_key *key2,
|
|
|
|
const union xfs_btree_key *mask)
|
2023-04-11 19:00:10 -07:00
|
|
|
{
|
2023-04-11 19:00:11 -07:00
|
|
|
ASSERT(!mask || mask->inobt.ir_startino);
|
|
|
|
|
2023-04-11 19:00:10 -07:00
|
|
|
return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
|
|
|
|
be32_to_cpu(key2->inobt.ir_startino));
|
|
|
|
}
|
|
|
|
|
2024-02-22 12:35:15 -08:00
|
|
|
const struct xfs_btree_ops xfs_inobt_ops = {
|
2024-02-22 12:39:47 -08:00
|
|
|
.name = "ino",
|
2024-02-22 12:36:17 -08:00
|
|
|
.type = XFS_BTREE_TYPE_AG,
|
|
|
|
|
2008-10-30 16:55:34 +11:00
|
|
|
.rec_len = sizeof(xfs_inobt_rec_t),
|
|
|
|
.key_len = sizeof(xfs_inobt_key_t),
|
2024-02-22 12:35:36 -08:00
|
|
|
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
|
2008-10-30 16:55:34 +11:00
|
|
|
|
2024-02-22 12:35:20 -08:00
|
|
|
.lru_refs = XFS_INO_BTREE_REF,
|
2024-02-22 12:35:21 -08:00
|
|
|
.statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
|
2024-02-22 12:39:47 -08:00
|
|
|
.sick_mask = XFS_SICK_AG_INOBT,
|
2024-02-22 12:35:20 -08:00
|
|
|
|
2008-10-30 16:53:59 +11:00
|
|
|
.dup_cursor = xfs_inobt_dup_cursor,
|
2008-10-30 16:57:16 +11:00
|
|
|
.set_root = xfs_inobt_set_root,
|
2008-10-30 16:57:03 +11:00
|
|
|
.alloc_block = xfs_inobt_alloc_block,
|
2008-10-30 16:57:51 +11:00
|
|
|
.free_block = xfs_inobt_free_block,
|
2008-10-30 16:58:01 +11:00
|
|
|
.get_minrecs = xfs_inobt_get_minrecs,
|
2008-10-30 16:55:23 +11:00
|
|
|
.get_maxrecs = xfs_inobt_get_maxrecs,
|
2008-10-30 16:56:09 +11:00
|
|
|
.init_key_from_rec = xfs_inobt_init_key_from_rec,
|
2017-06-16 11:00:08 -07:00
|
|
|
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
|
2008-10-30 16:57:40 +11:00
|
|
|
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
|
2008-10-30 16:56:09 +11:00
|
|
|
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
|
|
|
|
.key_diff = xfs_inobt_key_diff,
|
2012-11-14 17:54:40 +11:00
|
|
|
.buf_ops = &xfs_inobt_buf_ops,
|
2017-06-16 11:00:08 -07:00
|
|
|
.diff_two_keys = xfs_inobt_diff_two_keys,
|
2008-10-30 16:58:32 +11:00
|
|
|
.keys_inorder = xfs_inobt_keys_inorder,
|
|
|
|
.recs_inorder = xfs_inobt_recs_inorder,
|
2023-04-11 19:00:10 -07:00
|
|
|
.keys_contiguous = xfs_inobt_keys_contiguous,
|
2008-10-30 16:53:59 +11:00
|
|
|
};
|
|
|
|
|
2024-02-22 12:35:15 -08:00
|
|
|
const struct xfs_btree_ops xfs_finobt_ops = {
|
2024-02-22 12:39:47 -08:00
|
|
|
.name = "fino",
|
2024-02-22 12:36:17 -08:00
|
|
|
.type = XFS_BTREE_TYPE_AG,
|
|
|
|
|
2014-04-24 16:00:52 +10:00
|
|
|
.rec_len = sizeof(xfs_inobt_rec_t),
|
|
|
|
.key_len = sizeof(xfs_inobt_key_t),
|
2024-02-22 12:35:36 -08:00
|
|
|
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
|
2014-04-24 16:00:52 +10:00
|
|
|
|
2024-02-22 12:35:20 -08:00
|
|
|
.lru_refs = XFS_INO_BTREE_REF,
|
2024-02-22 12:35:21 -08:00
|
|
|
.statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
|
2024-02-22 12:39:47 -08:00
|
|
|
.sick_mask = XFS_SICK_AG_FINOBT,
|
2024-02-22 12:35:20 -08:00
|
|
|
|
2024-02-22 12:40:49 -08:00
|
|
|
.dup_cursor = xfs_finobt_dup_cursor,
|
2014-04-24 16:00:52 +10:00
|
|
|
.set_root = xfs_finobt_set_root,
|
2017-01-25 07:49:35 -08:00
|
|
|
.alloc_block = xfs_finobt_alloc_block,
|
2018-01-12 14:07:21 -08:00
|
|
|
.free_block = xfs_finobt_free_block,
|
2014-04-24 16:00:52 +10:00
|
|
|
.get_minrecs = xfs_inobt_get_minrecs,
|
|
|
|
.get_maxrecs = xfs_inobt_get_maxrecs,
|
|
|
|
.init_key_from_rec = xfs_inobt_init_key_from_rec,
|
2017-06-16 11:00:08 -07:00
|
|
|
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
|
2014-04-24 16:00:52 +10:00
|
|
|
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
|
|
|
|
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
|
|
|
|
.key_diff = xfs_inobt_key_diff,
|
2019-02-07 10:45:46 -08:00
|
|
|
.buf_ops = &xfs_finobt_buf_ops,
|
2017-06-16 11:00:08 -07:00
|
|
|
.diff_two_keys = xfs_inobt_diff_two_keys,
|
2014-04-24 16:00:52 +10:00
|
|
|
.keys_inorder = xfs_inobt_keys_inorder,
|
|
|
|
.recs_inorder = xfs_inobt_recs_inorder,
|
2023-04-11 19:00:10 -07:00
|
|
|
.keys_contiguous = xfs_inobt_keys_contiguous,
|
2014-04-24 16:00:52 +10:00
|
|
|
};
|
|
|
|
|
2008-10-30 16:53:59 +11:00
|
|
|
/*
|
2024-02-22 12:39:38 -08:00
|
|
|
* Create an inode btree cursor.
|
|
|
|
*
|
|
|
|
* For staging cursors tp and agbp are NULL.
|
2008-10-30 16:53:59 +11:00
|
|
|
*/
|
2024-02-22 12:39:38 -08:00
|
|
|
struct xfs_btree_cur *
|
|
|
|
xfs_inobt_init_cursor(
|
2021-06-02 10:48:24 +10:00
|
|
|
struct xfs_perag *pag,
|
2024-02-22 12:39:38 -08:00
|
|
|
struct xfs_trans *tp,
|
2024-02-22 12:40:49 -08:00
|
|
|
struct xfs_buf *agbp)
|
2008-10-30 16:53:59 +11:00
|
|
|
{
|
2024-11-03 20:18:38 -08:00
|
|
|
struct xfs_mount *mp = pag_mount(pag);
|
2008-10-30 16:53:59 +11:00
|
|
|
struct xfs_btree_cur *cur;
|
|
|
|
|
2024-02-22 12:40:51 -08:00
|
|
|
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
|
2024-02-22 12:40:49 -08:00
|
|
|
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
|
2024-11-03 20:18:44 -08:00
|
|
|
cur->bc_group = xfs_group_hold(pag_group(pag));
|
2024-02-22 12:40:49 -08:00
|
|
|
cur->bc_ag.agbp = agbp;
|
|
|
|
if (agbp) {
|
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
2024-02-22 12:35:21 -08:00
|
|
|
|
2024-02-22 12:40:49 -08:00
|
|
|
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
|
|
|
|
}
|
|
|
|
return cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a free inode btree cursor.
|
|
|
|
*
|
|
|
|
* For staging cursors tp and agbp are NULL.
|
|
|
|
*/
|
|
|
|
struct xfs_btree_cur *
|
|
|
|
xfs_finobt_init_cursor(
|
|
|
|
struct xfs_perag *pag,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buf *agbp)
|
|
|
|
{
|
2024-11-03 20:18:38 -08:00
|
|
|
struct xfs_mount *mp = pag_mount(pag);
|
2024-02-22 12:40:49 -08:00
|
|
|
struct xfs_btree_cur *cur;
|
2014-04-24 16:00:52 +10:00
|
|
|
|
2024-02-22 12:40:51 -08:00
|
|
|
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
|
2024-02-22 12:35:21 -08:00
|
|
|
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
|
2024-11-03 20:18:44 -08:00
|
|
|
cur->bc_group = xfs_group_hold(pag_group(pag));
|
2020-03-11 11:01:04 -07:00
|
|
|
cur->bc_ag.agbp = agbp;
|
2024-02-22 12:39:38 -08:00
|
|
|
if (agbp) {
|
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
|
|
|
|
2024-02-22 12:40:49 -08:00
|
|
|
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
|
2024-02-22 12:39:38 -08:00
|
|
|
}
|
2020-03-11 11:01:04 -07:00
|
|
|
return cur;
|
|
|
|
}
|
2008-10-30 16:53:59 +11:00
|
|
|
|
2020-03-11 11:01:04 -07:00
|
|
|
/*
|
|
|
|
* Install a new inobt btree root. Caller is responsible for invalidating
|
|
|
|
* and freeing the old btree blocks.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_inobt_commit_staged_btree(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buf *agbp)
|
|
|
|
{
|
|
|
|
struct xfs_agi *agi = agbp->b_addr;
|
|
|
|
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
2020-08-26 10:48:50 -07:00
|
|
|
int fields;
|
2020-03-11 11:01:04 -07:00
|
|
|
|
|
|
|
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
|
|
|
|
2024-02-22 12:40:51 -08:00
|
|
|
if (xfs_btree_is_ino(cur->bc_ops)) {
|
2020-08-26 10:48:50 -07:00
|
|
|
fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
|
2020-03-11 11:01:04 -07:00
|
|
|
agi->agi_root = cpu_to_be32(afake->af_root);
|
|
|
|
agi->agi_level = cpu_to_be32(afake->af_levels);
|
2021-08-18 18:46:55 -07:00
|
|
|
if (xfs_has_inobtcounts(cur->bc_mp)) {
|
2020-08-26 10:48:50 -07:00
|
|
|
agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
|
|
|
|
fields |= XFS_AGI_IBLOCKS;
|
|
|
|
}
|
|
|
|
xfs_ialloc_log_agi(tp, agbp, fields);
|
2024-02-22 12:37:35 -08:00
|
|
|
xfs_btree_commit_afakeroot(cur, tp, agbp);
|
2020-03-11 11:01:04 -07:00
|
|
|
} else {
|
2020-08-26 10:48:50 -07:00
|
|
|
fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
|
2020-03-11 11:01:04 -07:00
|
|
|
agi->agi_free_root = cpu_to_be32(afake->af_root);
|
|
|
|
agi->agi_free_level = cpu_to_be32(afake->af_levels);
|
2021-08-18 18:46:55 -07:00
|
|
|
if (xfs_has_inobtcounts(cur->bc_mp)) {
|
2020-08-26 10:48:50 -07:00
|
|
|
agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
|
|
|
|
fields |= XFS_AGI_IBLOCKS;
|
|
|
|
}
|
|
|
|
xfs_ialloc_log_agi(tp, agbp, fields);
|
2024-02-22 12:37:35 -08:00
|
|
|
xfs_btree_commit_afakeroot(cur, tp, agbp);
|
2020-03-11 11:01:04 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-23 10:32:06 -07:00
|
|
|
/* Calculate number of records in an inode btree block. */
|
|
|
|
static inline unsigned int
|
|
|
|
xfs_inobt_block_maxrecs(
|
|
|
|
unsigned int blocklen,
|
|
|
|
bool leaf)
|
|
|
|
{
|
|
|
|
if (leaf)
|
|
|
|
return blocklen / sizeof(xfs_inobt_rec_t);
|
|
|
|
return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
|
|
|
|
}
|
|
|
|
|
2008-10-30 17:11:19 +11:00
|
|
|
/*
|
|
|
|
* Calculate number of records in an inobt btree block.
|
|
|
|
*/
|
2024-08-30 15:37:21 -07:00
|
|
|
unsigned int
|
2008-10-30 17:11:19 +11:00
|
|
|
xfs_inobt_maxrecs(
|
|
|
|
struct xfs_mount *mp,
|
2024-08-30 15:37:21 -07:00
|
|
|
unsigned int blocklen,
|
|
|
|
bool leaf)
|
2008-10-30 17:11:19 +11:00
|
|
|
{
|
2008-10-30 17:14:34 +11:00
|
|
|
blocklen -= XFS_INOBT_BLOCK_LEN(mp);
|
2021-09-23 10:32:06 -07:00
|
|
|
return xfs_inobt_block_maxrecs(blocklen, leaf);
|
|
|
|
}
|
2008-10-30 17:11:19 +11:00
|
|
|
|
2021-09-23 10:32:06 -07:00
|
|
|
/*
|
|
|
|
* Maximum number of inode btree records per AG. Pretend that we can fill an
|
|
|
|
* entire AG completely full of inodes except for the AG headers.
|
|
|
|
*/
|
|
|
|
#define XFS_MAX_INODE_RECORDS \
|
|
|
|
((XFS_MAX_AG_BYTES - (4 * BBSIZE)) / XFS_DINODE_MIN_SIZE) / \
|
|
|
|
XFS_INODES_PER_CHUNK
|
|
|
|
|
|
|
|
/* Compute the max possible height for the inode btree. */
|
|
|
|
static inline unsigned int
|
|
|
|
xfs_inobt_maxlevels_ondisk(void)
|
|
|
|
{
|
|
|
|
unsigned int minrecs[2];
|
|
|
|
unsigned int blocklen;
|
|
|
|
|
|
|
|
blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
|
|
|
|
XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
|
|
|
|
|
|
|
|
minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
|
|
|
|
minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
|
|
|
|
|
|
|
|
return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compute the max possible height for the free inode btree. */
|
|
|
|
static inline unsigned int
|
|
|
|
xfs_finobt_maxlevels_ondisk(void)
|
|
|
|
{
|
|
|
|
unsigned int minrecs[2];
|
|
|
|
unsigned int blocklen;
|
|
|
|
|
|
|
|
blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
|
|
|
|
|
|
|
|
minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
|
|
|
|
minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
|
|
|
|
|
|
|
|
return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compute the max possible height for either inode btree. */
|
|
|
|
unsigned int
|
|
|
|
xfs_iallocbt_maxlevels_ondisk(void)
|
|
|
|
{
|
|
|
|
return max(xfs_inobt_maxlevels_ondisk(),
|
|
|
|
xfs_finobt_maxlevels_ondisk());
|
2008-10-30 17:11:19 +11:00
|
|
|
}
|
2015-05-29 09:09:05 +10:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the inode record holemask to an inode allocation bitmap. The inode
|
|
|
|
* allocation bitmap is inode granularity and specifies whether an inode is
|
|
|
|
* physically allocated on disk (not whether the inode is considered allocated
|
|
|
|
* or free by the fs).
|
|
|
|
*
|
|
|
|
* A bit value of 1 means the inode is allocated, a value of 0 means it is free.
|
|
|
|
*/
|
|
|
|
uint64_t
|
|
|
|
xfs_inobt_irec_to_allocmask(
|
2023-04-11 19:00:01 -07:00
|
|
|
const struct xfs_inobt_rec_incore *rec)
|
2015-05-29 09:09:05 +10:00
|
|
|
{
|
|
|
|
uint64_t bitmap = 0;
|
|
|
|
uint64_t inodespbit;
|
|
|
|
int nextbit;
|
|
|
|
uint allocbitmap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The holemask has 16-bits for a 64 inode record. Therefore each
|
|
|
|
* holemask bit represents multiple inodes. Create a mask of bits to set
|
|
|
|
* in the allocmask for each holemask bit.
|
|
|
|
*/
|
|
|
|
inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocated inodes are represented by 0 bits in holemask. Invert the 0
|
|
|
|
* bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
|
|
|
|
* anything beyond the 16 holemask bits since this casts to a larger
|
|
|
|
* type.
|
|
|
|
*/
|
|
|
|
allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocbitmap is the inverted holemask so every set bit represents
|
|
|
|
* allocated inodes. To expand from 16-bit holemask granularity to
|
|
|
|
* 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
|
|
|
|
* bitmap for every holemask bit.
|
|
|
|
*/
|
|
|
|
nextbit = xfs_next_bit(&allocbitmap, 1, 0);
|
|
|
|
while (nextbit != -1) {
|
|
|
|
ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
|
|
|
|
|
|
|
|
bitmap |= (inodespbit <<
|
|
|
|
(nextbit * XFS_INODES_PER_HOLEMASK_BIT));
|
|
|
|
|
|
|
|
nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return bitmap;
|
|
|
|
}
|
2015-05-29 09:18:32 +10:00
|
|
|
|
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
|
|
|
/*
|
|
|
|
* Verify that an in-core inode record has a valid inode count.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_inobt_rec_check_count(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_inobt_rec_incore *rec)
|
|
|
|
{
|
|
|
|
int inocount = 0;
|
|
|
|
int nextbit = 0;
|
|
|
|
uint64_t allocbmap;
|
|
|
|
int wordsz;
|
|
|
|
|
|
|
|
wordsz = sizeof(allocbmap) / sizeof(unsigned int);
|
|
|
|
allocbmap = xfs_inobt_irec_to_allocmask(rec);
|
|
|
|
|
|
|
|
nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
|
|
|
|
while (nextbit != -1) {
|
|
|
|
inocount++;
|
|
|
|
nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
|
|
|
|
nextbit + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inocount != rec->ir_count)
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* DEBUG */
|
2017-01-25 07:49:35 -08:00
|
|
|
|
|
|
|
static xfs_extlen_t
|
|
|
|
xfs_inobt_max_size(
|
2022-07-07 19:13:17 +10:00
|
|
|
struct xfs_perag *pag)
|
2017-01-25 07:49:35 -08:00
|
|
|
{
|
2024-11-03 20:18:38 -08:00
|
|
|
struct xfs_mount *mp = pag_mount(pag);
|
2024-11-03 20:19:35 -08:00
|
|
|
xfs_agblock_t agblocks = pag_group(pag)->xg_block_count;
|
2018-11-19 13:31:08 -08:00
|
|
|
|
2017-01-25 07:49:35 -08:00
|
|
|
/* Bail out if we're uninitialized, which can happen in mkfs. */
|
2019-06-05 11:19:34 -07:00
|
|
|
if (M_IGEO(mp)->inobt_mxr[0] == 0)
|
2017-01-25 07:49:35 -08:00
|
|
|
return 0;
|
|
|
|
|
2019-05-20 11:25:39 -07:00
|
|
|
/*
|
|
|
|
* The log is permanently allocated, so the space it occupies will
|
|
|
|
* never be available for the kinds of things that would require btree
|
|
|
|
* expansion. We therefore can pretend the space isn't there.
|
|
|
|
*/
|
2024-11-03 20:18:38 -08:00
|
|
|
if (xfs_ag_contains_log(mp, pag_agno(pag)))
|
2019-05-20 11:25:39 -07:00
|
|
|
agblocks -= mp->m_sb.sb_logblocks;
|
|
|
|
|
2019-06-05 11:19:34 -07:00
|
|
|
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
|
2018-11-19 13:31:08 -08:00
|
|
|
(uint64_t)agblocks * mp->m_sb.sb_inopblock /
|
|
|
|
XFS_INODES_PER_CHUNK);
|
2017-01-25 07:49:35 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2024-02-22 12:40:47 -08:00
|
|
|
xfs_finobt_count_blocks(
|
2021-06-02 10:48:24 +10:00
|
|
|
struct xfs_perag *pag,
|
2023-02-13 09:14:52 +11:00
|
|
|
struct xfs_trans *tp,
|
2017-01-25 07:49:35 -08:00
|
|
|
xfs_extlen_t *tree_blocks)
|
|
|
|
{
|
2019-07-02 09:39:38 -07:00
|
|
|
struct xfs_buf *agbp = NULL;
|
2024-02-22 12:40:46 -08:00
|
|
|
struct xfs_btree_cur *cur;
|
2024-12-02 10:57:26 -08:00
|
|
|
xfs_filblks_t blocks;
|
2017-01-25 07:49:35 -08:00
|
|
|
int error;
|
|
|
|
|
2024-04-15 14:54:03 -07:00
|
|
|
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
2017-01-25 07:49:35 -08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
xfs: xfs_finobt_count_blocks() walks the wrong btree
As a result of the factoring in commit 14dd46cf31f4 ("xfs: split
xfs_inobt_init_cursor"), mount started taking a long time on a
user's filesystem. For Anders, this made mount times regress from
under a second to over 15 minutes for a filesystem with only 30
million inodes in it.
Anders bisected it down to the above commit, but even then the bug
was not obvious. In this commit, over 20 calls to
xfs_inobt_init_cursor() were modified, and some we modified to call
a new function named xfs_finobt_init_cursor().
If that takes you a moment to reread those function names to see
what the rename was, then you have realised why this bug wasn't
spotted during review. And it wasn't spotted on inspection even
after the bisect pointed at this commit - a single missing "f" isn't
the easiest thing for a human eye to notice....
The result is that xfs_finobt_count_blocks() now incorrectly calls
xfs_inobt_init_cursor() so it is now walking the inobt instead of
the finobt. Hence when there are lots of allocated inodes in a
filesystem, mount takes a -long- time run because it now walks a
massive allocated inode btrees instead of the small, nearly empty
free inode btrees. It also means all the finobt space reservations
are wrong, so mount could potentially given ENOSPC on kernel
upgrade.
In hindsight, commit 14dd46cf31f4 should have been two commits - the
first to convert the finobt callers to the new API, the second to
modify the xfs_inobt_init_cursor() API for the inobt callers. That
would have made the bug very obvious during review.
Fixes: 14dd46cf31f4 ("xfs: split xfs_inobt_init_cursor")
Reported-by: Anders Blomdell <anders.blomdell@gmail.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2024-08-22 16:59:33 -07:00
|
|
|
cur = xfs_finobt_init_cursor(pag, tp, agbp);
|
2024-12-02 10:57:26 -08:00
|
|
|
error = xfs_btree_count_blocks(cur, &blocks);
|
2018-07-19 12:26:31 -07:00
|
|
|
xfs_btree_del_cursor(cur, error);
|
2018-07-29 22:37:08 -07:00
|
|
|
xfs_trans_brelse(tp, agbp);
|
2024-12-02 10:57:26 -08:00
|
|
|
*tree_blocks = blocks;
|
2017-01-25 07:49:35 -08:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2020-08-26 10:54:27 -07:00
|
|
|
/* Read finobt block count from AGI header. */
|
|
|
|
static int
|
|
|
|
xfs_finobt_read_blocks(
|
2021-06-02 10:48:24 +10:00
|
|
|
struct xfs_perag *pag,
|
2022-07-07 19:07:24 +10:00
|
|
|
struct xfs_trans *tp,
|
2020-08-26 10:54:27 -07:00
|
|
|
xfs_extlen_t *tree_blocks)
|
|
|
|
{
|
|
|
|
struct xfs_buf *agbp;
|
|
|
|
struct xfs_agi *agi;
|
|
|
|
int error;
|
|
|
|
|
2024-04-15 14:54:03 -07:00
|
|
|
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
2020-08-26 10:54:27 -07:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
agi = agbp->b_addr;
|
|
|
|
*tree_blocks = be32_to_cpu(agi->agi_fblocks);
|
|
|
|
xfs_trans_brelse(tp, agbp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-25 07:49:35 -08:00
|
|
|
/*
|
|
|
|
* Figure out how many blocks to reserve and how many are used by this btree.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_finobt_calc_reserves(
|
2021-06-02 10:48:24 +10:00
|
|
|
struct xfs_perag *pag,
|
2023-02-13 09:14:52 +11:00
|
|
|
struct xfs_trans *tp,
|
2017-01-25 07:49:35 -08:00
|
|
|
xfs_extlen_t *ask,
|
|
|
|
xfs_extlen_t *used)
|
|
|
|
{
|
|
|
|
xfs_extlen_t tree_len = 0;
|
|
|
|
int error;
|
|
|
|
|
2024-11-03 20:18:38 -08:00
|
|
|
if (!xfs_has_finobt(pag_mount(pag)))
|
2017-01-25 07:49:35 -08:00
|
|
|
return 0;
|
|
|
|
|
2024-11-03 20:18:38 -08:00
|
|
|
if (xfs_has_inobtcounts(pag_mount(pag)))
|
2022-07-07 19:07:24 +10:00
|
|
|
error = xfs_finobt_read_blocks(pag, tp, &tree_len);
|
2020-08-26 10:54:27 -07:00
|
|
|
else
|
2024-02-22 12:40:47 -08:00
|
|
|
error = xfs_finobt_count_blocks(pag, tp, &tree_len);
|
2017-01-25 07:49:35 -08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2022-07-07 19:13:17 +10:00
|
|
|
*ask += xfs_inobt_max_size(pag);
|
2017-01-25 07:49:35 -08:00
|
|
|
*used += tree_len;
|
|
|
|
return 0;
|
|
|
|
}
|
2018-05-09 10:02:01 -07:00
|
|
|
|
|
|
|
/* Calculate the inobt btree size for some records. */
|
|
|
|
xfs_extlen_t
|
|
|
|
xfs_iallocbt_calc_size(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
unsigned long long len)
|
|
|
|
{
|
2019-06-05 11:19:34 -07:00
|
|
|
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
|
2018-05-09 10:02:01 -07:00
|
|
|
}
|
2021-09-23 12:21:37 -07:00
|
|
|
|
|
|
|
int __init
|
|
|
|
xfs_inobt_init_cur_cache(void)
|
|
|
|
{
|
|
|
|
xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur",
|
|
|
|
xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()),
|
|
|
|
0, 0, NULL);
|
|
|
|
|
|
|
|
if (!xfs_inobt_cur_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_inobt_destroy_cur_cache(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(xfs_inobt_cur_cache);
|
|
|
|
xfs_inobt_cur_cache = NULL;
|
|
|
|
}
|