mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-12 08:00:09 +00:00
Merge git://oss.sgi.com:8090/oss/git/xfs-2.6
* git://oss.sgi.com:8090/oss/git/xfs-2.6: [XFS] Cleanup in XFS after recent get_block_t interface tweaks. [XFS] Remove unused/obsoleted function: xfs_bmap_do_search_extents() [XFS] A change to inode chunk allocation to try allocating the new chunk Fixes a regression from the recent "remove ->get_blocks() support" [XFS] Fix compiler warning and small code inconsistencies in compat [XFS] We really suck at spulling. Thanks to Chris Pascoe for fixing all
This commit is contained in:
commit
e71ac6032e
@ -524,8 +524,6 @@ static int get_more_blocks(struct dio *dio)
|
||||
*/
|
||||
ret = dio->page_errors;
|
||||
if (ret == 0) {
|
||||
map_bh->b_state = 0;
|
||||
map_bh->b_size = 0;
|
||||
BUG_ON(dio->block_in_file >= dio->final_block_in_request);
|
||||
fs_startblk = dio->block_in_file >> dio->blkfactor;
|
||||
dio_count = dio->final_block_in_request - dio->block_in_file;
|
||||
@ -534,6 +532,9 @@ static int get_more_blocks(struct dio *dio)
|
||||
if (dio_count & blkmask)
|
||||
fs_count++;
|
||||
|
||||
map_bh->b_state = 0;
|
||||
map_bh->b_size = fs_count << dio->inode->i_blkbits;
|
||||
|
||||
create = dio->rw == WRITE;
|
||||
if (dio->lock_type == DIO_LOCKING) {
|
||||
if (dio->block_in_file < (i_size_read(dio->inode) >>
|
||||
@ -542,13 +543,13 @@ static int get_more_blocks(struct dio *dio)
|
||||
} else if (dio->lock_type == DIO_NO_LOCKING) {
|
||||
create = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For writes inside i_size we forbid block creations: only
|
||||
* overwrites are permitted. We fall back to buffered writes
|
||||
* at a higher level for inside-i_size block-instantiating
|
||||
* writes.
|
||||
*/
|
||||
map_bh->b_size = fs_count << dio->blkbits;
|
||||
ret = (*dio->get_block)(dio->inode, fs_startblk,
|
||||
map_bh, create);
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ static inline void mrdemote(mrlock_t *mrp)
|
||||
* Debug-only routine, without some platform-specific asm code, we can
|
||||
* now only answer requests regarding whether we hold the lock for write
|
||||
* (reader state is outside our visibility, we only track writer state).
|
||||
* Note: means !ismrlocked would give false positivies, so don't do that.
|
||||
* Note: means !ismrlocked would give false positives, so don't do that.
|
||||
*/
|
||||
static inline int ismrlocked(mrlock_t *mrp, int type)
|
||||
{
|
||||
|
@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
|
||||
* assumes that all buffers on the page are started at the same time.
|
||||
*
|
||||
* The fix is two passes across the ioend list - one to start writeback on the
|
||||
* bufferheads, and then the second one submit them for I/O.
|
||||
* buffer_heads, and then submit them for I/O on the second pass.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_submit_ioend(
|
||||
@ -699,7 +699,7 @@ xfs_convert_page(
|
||||
|
||||
/*
|
||||
* page_dirty is initially a count of buffers on the page before
|
||||
* EOF and is decrememted as we move each into a cleanable state.
|
||||
* EOF and is decremented as we move each into a cleanable state.
|
||||
*
|
||||
* Derivation:
|
||||
*
|
||||
@ -842,7 +842,7 @@ xfs_cluster_write(
|
||||
* page if possible.
|
||||
* The bh->b_state's cannot know if any of the blocks or which block for
|
||||
* that matter are dirty due to mmap writes, and therefore bh uptodate is
|
||||
* only vaild if the page itself isn't completely uptodate. Some layers
|
||||
* only valid if the page itself isn't completely uptodate. Some layers
|
||||
* may clear the page dirty flag prior to calling write page, under the
|
||||
* assumption the entire page will be written out; by not writing out the
|
||||
* whole page the page can be reused before all valid dirty data is
|
||||
@ -892,7 +892,7 @@ xfs_page_state_convert(
|
||||
|
||||
/*
|
||||
* page_dirty is initially a count of buffers on the page before
|
||||
* EOF and is decrememted as we move each into a cleanable state.
|
||||
* EOF and is decremented as we move each into a cleanable state.
|
||||
*
|
||||
* Derivation:
|
||||
*
|
||||
@ -1223,10 +1223,9 @@ free_buffers:
|
||||
}
|
||||
|
||||
STATIC int
|
||||
__xfs_get_block(
|
||||
__xfs_get_blocks(
|
||||
struct inode *inode,
|
||||
sector_t iblock,
|
||||
unsigned long blocks,
|
||||
struct buffer_head *bh_result,
|
||||
int create,
|
||||
int direct,
|
||||
@ -1236,22 +1235,17 @@ __xfs_get_block(
|
||||
xfs_iomap_t iomap;
|
||||
xfs_off_t offset;
|
||||
ssize_t size;
|
||||
int retpbbm = 1;
|
||||
int niomap = 1;
|
||||
int error;
|
||||
|
||||
offset = (xfs_off_t)iblock << inode->i_blkbits;
|
||||
if (blocks)
|
||||
size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
|
||||
(xfs_off_t)blocks << inode->i_blkbits);
|
||||
else
|
||||
size = 1 << inode->i_blkbits;
|
||||
|
||||
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
|
||||
size = bh_result->b_size;
|
||||
VOP_BMAP(vp, offset, size,
|
||||
create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
|
||||
create ? flags : BMAPI_READ, &iomap, &niomap, error);
|
||||
if (error)
|
||||
return -error;
|
||||
|
||||
if (retpbbm == 0)
|
||||
if (niomap == 0)
|
||||
return 0;
|
||||
|
||||
if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
|
||||
@ -1271,12 +1265,16 @@ __xfs_get_block(
|
||||
}
|
||||
}
|
||||
|
||||
/* If this is a realtime file, data might be on a new device */
|
||||
/*
|
||||
* If this is a realtime file, data may be on a different device.
|
||||
* to that pointed to from the buffer_head b_bdev currently.
|
||||
*/
|
||||
bh_result->b_bdev = iomap.iomap_target->bt_bdev;
|
||||
|
||||
/* If we previously allocated a block out beyond eof and
|
||||
* we are now coming back to use it then we will need to
|
||||
* flag it as new even if it has a disk address.
|
||||
/*
|
||||
* If we previously allocated a block out beyond eof and we are
|
||||
* now coming back to use it then we will need to flag it as new
|
||||
* even if it has a disk address.
|
||||
*/
|
||||
if (create &&
|
||||
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
|
||||
@ -1292,26 +1290,24 @@ __xfs_get_block(
|
||||
}
|
||||
}
|
||||
|
||||
if (blocks) {
|
||||
if (direct || size > (1 << inode->i_blkbits)) {
|
||||
ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
|
||||
offset = min_t(xfs_off_t,
|
||||
iomap.iomap_bsize - iomap.iomap_delta,
|
||||
(xfs_off_t)blocks << inode->i_blkbits);
|
||||
bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
|
||||
iomap.iomap_bsize - iomap.iomap_delta, size);
|
||||
bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_get_block(
|
||||
xfs_get_blocks(
|
||||
struct inode *inode,
|
||||
sector_t iblock,
|
||||
struct buffer_head *bh_result,
|
||||
int create)
|
||||
{
|
||||
return __xfs_get_block(inode, iblock,
|
||||
bh_result->b_size >> inode->i_blkbits,
|
||||
return __xfs_get_blocks(inode, iblock,
|
||||
bh_result, create, 0, BMAPI_WRITE);
|
||||
}
|
||||
|
||||
@ -1322,8 +1318,7 @@ xfs_get_blocks_direct(
|
||||
struct buffer_head *bh_result,
|
||||
int create)
|
||||
{
|
||||
return __xfs_get_block(inode, iblock,
|
||||
bh_result->b_size >> inode->i_blkbits,
|
||||
return __xfs_get_blocks(inode, iblock,
|
||||
bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
|
||||
}
|
||||
|
||||
@ -1339,9 +1334,9 @@ xfs_end_io_direct(
|
||||
/*
|
||||
* Non-NULL private data means we need to issue a transaction to
|
||||
* convert a range from unwritten to written extents. This needs
|
||||
* to happen from process contect but aio+dio I/O completion
|
||||
* to happen from process context but aio+dio I/O completion
|
||||
* happens from irq context so we need to defer it to a workqueue.
|
||||
* This is not nessecary for synchronous direct I/O, but we do
|
||||
* This is not necessary for synchronous direct I/O, but we do
|
||||
* it anyway to keep the code uniform and simpler.
|
||||
*
|
||||
* The core direct I/O code might be changed to always call the
|
||||
@ -1358,7 +1353,7 @@ xfs_end_io_direct(
|
||||
}
|
||||
|
||||
/*
|
||||
* blockdev_direct_IO can return an error even afer the I/O
|
||||
* blockdev_direct_IO can return an error even after the I/O
|
||||
* completion handler was called. Thus we need to protect
|
||||
* against double-freeing.
|
||||
*/
|
||||
@ -1405,7 +1400,7 @@ xfs_vm_prepare_write(
|
||||
unsigned int from,
|
||||
unsigned int to)
|
||||
{
|
||||
return block_prepare_write(page, from, to, xfs_get_block);
|
||||
return block_prepare_write(page, from, to, xfs_get_blocks);
|
||||
}
|
||||
|
||||
STATIC sector_t
|
||||
@ -1422,7 +1417,7 @@ xfs_vm_bmap(
|
||||
VOP_RWLOCK(vp, VRWLOCK_READ);
|
||||
VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
|
||||
VOP_RWUNLOCK(vp, VRWLOCK_READ);
|
||||
return generic_block_bmap(mapping, block, xfs_get_block);
|
||||
return generic_block_bmap(mapping, block, xfs_get_blocks);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
@ -1430,7 +1425,7 @@ xfs_vm_readpage(
|
||||
struct file *unused,
|
||||
struct page *page)
|
||||
{
|
||||
return mpage_readpage(page, xfs_get_block);
|
||||
return mpage_readpage(page, xfs_get_blocks);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
@ -1440,7 +1435,7 @@ xfs_vm_readpages(
|
||||
struct list_head *pages,
|
||||
unsigned nr_pages)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
|
||||
return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
@ -41,6 +41,6 @@ typedef struct xfs_ioend {
|
||||
} xfs_ioend_t;
|
||||
|
||||
extern struct address_space_operations xfs_address_space_operations;
|
||||
extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
|
||||
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
|
||||
|
||||
#endif /* __XFS_IOPS_H__ */
|
||||
|
@ -54,7 +54,7 @@
|
||||
* Note, the NFS filehandle also includes an fsid portion which
|
||||
* may have an inode number in it. That number is hardcoded to
|
||||
* 32bits and there is no way for XFS to intercept it. In
|
||||
* practice this means when exporting an XFS filesytem with 64bit
|
||||
* practice this means when exporting an XFS filesystem with 64bit
|
||||
* inodes you should either export the mountpoint (rather than
|
||||
* a subdirectory) or use the "fsid" export option.
|
||||
*/
|
||||
|
@ -72,7 +72,7 @@ xfs_ioctl32_flock(
|
||||
copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
|
||||
copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return (unsigned long)p;
|
||||
}
|
||||
|
||||
@ -107,11 +107,15 @@ xfs_ioctl32_bulkstat(
|
||||
#endif
|
||||
|
||||
STATIC long
|
||||
xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
|
||||
xfs_compat_ioctl(
|
||||
int mode,
|
||||
struct file *file,
|
||||
unsigned cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
vnode_t *vp = vn_from_inode(inode);
|
||||
int error;
|
||||
struct inode *inode = f->f_dentry->d_inode;
|
||||
vnode_t *vp = vn_to_inode(inode);
|
||||
|
||||
switch (cmd) {
|
||||
case XFS_IOC_DIOINFO:
|
||||
@ -189,7 +193,7 @@ xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
VOP_IOCTL(vp, inode, f, mode, cmd, (void __user *)arg, error);
|
||||
VOP_IOCTL(vp, inode, file, mode, cmd, (void __user *)arg, error);
|
||||
VMODIFY(vp);
|
||||
|
||||
return error;
|
||||
@ -197,18 +201,18 @@ xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
|
||||
|
||||
long
|
||||
xfs_file_compat_ioctl(
|
||||
struct file *f,
|
||||
struct file *file,
|
||||
unsigned cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return xfs_compat_ioctl(0, f, cmd, arg);
|
||||
return xfs_compat_ioctl(0, file, cmd, arg);
|
||||
}
|
||||
|
||||
long
|
||||
xfs_file_compat_invis_ioctl(
|
||||
struct file *f,
|
||||
struct file *file,
|
||||
unsigned cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return xfs_compat_ioctl(IO_INVIS, f, cmd, arg);
|
||||
return xfs_compat_ioctl(IO_INVIS, file, cmd, arg);
|
||||
}
|
||||
|
@ -708,7 +708,7 @@ STATIC void
|
||||
xfs_vn_truncate(
|
||||
struct inode *inode)
|
||||
{
|
||||
block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block);
|
||||
block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_blocks);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
@ -681,7 +681,7 @@ start:
|
||||
eventsent = 1;
|
||||
|
||||
/*
|
||||
* The iolock was dropped and reaquired in XFS_SEND_DATA
|
||||
* The iolock was dropped and reacquired in XFS_SEND_DATA
|
||||
* so we have to recheck the size when appending.
|
||||
* We will only "goto start;" once, since having sent the
|
||||
* event prevents another call to XFS_SEND_DATA, which is
|
||||
|
@ -92,7 +92,7 @@ typedef enum {
|
||||
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
|
||||
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
|
||||
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
|
||||
#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */
|
||||
#define SYNC_QUIESCE 0x0100 /* quiesce filesystem for a snapshot */
|
||||
|
||||
typedef int (*vfs_mount_t)(bhv_desc_t *,
|
||||
struct xfs_mount_args *, struct cred *);
|
||||
|
@ -221,7 +221,7 @@ xfs_qm_dqunpin_wait(
|
||||
* as possible.
|
||||
*
|
||||
* We must not be holding the AIL_LOCK at this point. Calling incore() to
|
||||
* search the buffercache can be a time consuming thing, and AIL_LOCK is a
|
||||
* search the buffer cache can be a time consuming thing, and AIL_LOCK is a
|
||||
* spinlock.
|
||||
*/
|
||||
STATIC void
|
||||
|
@ -289,7 +289,7 @@ xfs_qm_rele_quotafs_ref(
|
||||
|
||||
/*
|
||||
* This is called at mount time from xfs_mountfs to initialize the quotainfo
|
||||
* structure and start the global quotamanager (xfs_Gqm) if it hasn't done
|
||||
* structure and start the global quota manager (xfs_Gqm) if it hasn't done
|
||||
* so already. Note that the superblock has not been read in yet.
|
||||
*/
|
||||
void
|
||||
@ -807,7 +807,7 @@ xfs_qm_dqattach_one(
|
||||
* Given a udquot and gdquot, attach a ptr to the group dquot in the
|
||||
* udquot as a hint for future lookups. The idea sounds simple, but the
|
||||
* execution isn't, because the udquot might have a group dquot attached
|
||||
* already and getting rid of that gets us into lock ordering contraints.
|
||||
* already and getting rid of that gets us into lock ordering constraints.
|
||||
* The process is complicated more by the fact that the dquots may or may not
|
||||
* be locked on entry.
|
||||
*/
|
||||
@ -1094,10 +1094,10 @@ xfs_qm_sync(
|
||||
}
|
||||
/*
|
||||
* If we can't grab the flush lock then if the caller
|
||||
* really wanted us to give this our best shot,
|
||||
* really wanted us to give this our best shot, so
|
||||
* see if we can give a push to the buffer before we wait
|
||||
* on the flush lock. At this point, we know that
|
||||
* eventhough the dquot is being flushed,
|
||||
* even though the dquot is being flushed,
|
||||
* it has (new) dirty data.
|
||||
*/
|
||||
xfs_qm_dqflock_pushbuf_wait(dqp);
|
||||
@ -1491,7 +1491,7 @@ xfs_qm_reset_dqcounts(
|
||||
/*
|
||||
* Do a sanity check, and if needed, repair the dqblk. Don't
|
||||
* output any warnings because it's perfectly possible to
|
||||
* find unitialized dquot blks. See comment in xfs_qm_dqcheck.
|
||||
* find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
|
||||
*/
|
||||
(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
|
||||
"xfs_quotacheck");
|
||||
@ -1580,7 +1580,7 @@ xfs_qm_dqiterate(
|
||||
|
||||
error = 0;
|
||||
/*
|
||||
* This looks racey, but we can't keep an inode lock across a
|
||||
* This looks racy, but we can't keep an inode lock across a
|
||||
* trans_reserve. But, this gets called during quotacheck, and that
|
||||
* happens only at mount time which is single threaded.
|
||||
*/
|
||||
@ -1824,7 +1824,7 @@ xfs_qm_dqusage_adjust(
|
||||
* we have to start from the beginning anyway.
|
||||
* Once we're done, we'll log all the dquot bufs.
|
||||
*
|
||||
* The *QUOTA_ON checks below may look pretty racey, but quotachecks
|
||||
* The *QUOTA_ON checks below may look pretty racy, but quotachecks
|
||||
* and quotaoffs don't race. (Quotachecks happen at mount time only).
|
||||
*/
|
||||
if (XFS_IS_UQUOTA_ON(mp)) {
|
||||
|
@ -912,7 +912,7 @@ xfs_qm_export_dquot(
|
||||
|
||||
/*
|
||||
* Internally, we don't reset all the timers when quota enforcement
|
||||
* gets turned off. No need to confuse the userlevel code,
|
||||
* gets turned off. No need to confuse the user level code,
|
||||
* so return zeroes in that case.
|
||||
*/
|
||||
if (! XFS_IS_QUOTA_ENFORCED(mp)) {
|
||||
|
@ -804,7 +804,7 @@ xfs_trans_reserve_quota_bydquots(
|
||||
}
|
||||
|
||||
/*
|
||||
* Didnt change anything critical, so, no need to log
|
||||
* Didn't change anything critical, so, no need to log
|
||||
*/
|
||||
return (0);
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ xfs_acl_allow_set(
|
||||
* The access control process to determine the access permission:
|
||||
* if uid == file owner id, use the file owner bits.
|
||||
* if gid == file owner group id, use the file group bits.
|
||||
* scan ACL for a maching user or group, and use matched entry
|
||||
* scan ACL for a matching user or group, and use matched entry
|
||||
* permission. Use total permissions of all matching group entries,
|
||||
* until all acl entries are exhausted. The final permission produced
|
||||
* by matching acl entry or entries needs to be & with group permission.
|
||||
|
@ -179,7 +179,7 @@ typedef struct xfs_perag
|
||||
{
|
||||
char pagf_init; /* this agf's entry is initialized */
|
||||
char pagi_init; /* this agi's entry is initialized */
|
||||
char pagf_metadata; /* the agf is prefered to be metadata */
|
||||
char pagf_metadata; /* the agf is preferred to be metadata */
|
||||
char pagi_inodeok; /* The agi is ok for inodes */
|
||||
__uint8_t pagf_levels[XFS_BTNUM_AGF];
|
||||
/* # of levels in bno & cnt btree */
|
||||
|
@ -511,7 +511,7 @@ STATIC void
|
||||
xfs_alloc_trace_busy(
|
||||
char *name, /* function tag string */
|
||||
char *str, /* additional string */
|
||||
xfs_mount_t *mp, /* file system mount poing */
|
||||
xfs_mount_t *mp, /* file system mount point */
|
||||
xfs_agnumber_t agno, /* allocation group number */
|
||||
xfs_agblock_t agbno, /* a.g. relative block number */
|
||||
xfs_extlen_t len, /* length of extent */
|
||||
@ -1843,7 +1843,7 @@ xfs_alloc_fix_freelist(
|
||||
} else
|
||||
agbp = NULL;
|
||||
|
||||
/* If this is a metadata prefered pag and we are user data
|
||||
/* If this is a metadata preferred pag and we are user data
|
||||
* then try somewhere else if we are not being asked to
|
||||
* try harder at this point
|
||||
*/
|
||||
@ -2458,7 +2458,7 @@ error0:
|
||||
/*
|
||||
* AG Busy list management
|
||||
* The busy list contains block ranges that have been freed but whose
|
||||
* transacations have not yet hit disk. If any block listed in a busy
|
||||
* transactions have not yet hit disk. If any block listed in a busy
|
||||
* list is reused, the transaction that freed it must be forced to disk
|
||||
* before continuing to use the block.
|
||||
*
|
||||
|
@ -68,7 +68,7 @@ typedef struct xfs_alloc_arg {
|
||||
xfs_alloctype_t otype; /* original allocation type */
|
||||
char wasdel; /* set if allocation was prev delayed */
|
||||
char wasfromfl; /* set if allocation is from freelist */
|
||||
char isfl; /* set if is freelist blocks - !actg */
|
||||
char isfl; /* set if is freelist blocks - !acctg */
|
||||
char userdata; /* set if this is user data */
|
||||
} xfs_alloc_arg_t;
|
||||
|
||||
|
@ -294,7 +294,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
|
||||
xfs_trans_ihold(args.trans, dp);
|
||||
|
||||
/*
|
||||
* If the attribute list is non-existant or a shortform list,
|
||||
* If the attribute list is non-existent or a shortform list,
|
||||
* upgrade it to a single-leaf-block attribute list.
|
||||
*/
|
||||
if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
|
||||
@ -1584,7 +1584,7 @@ out:
|
||||
* Fill in the disk block numbers in the state structure for the buffers
|
||||
* that are attached to the state structure.
|
||||
* This is done so that we can quickly reattach ourselves to those buffers
|
||||
* after some set of transaction commit's has released these buffers.
|
||||
* after some set of transaction commits have released these buffers.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_attr_fillstate(xfs_da_state_t *state)
|
||||
@ -1631,7 +1631,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
|
||||
/*
|
||||
* Reattach the buffers to the state structure based on the disk block
|
||||
* numbers stored in the state structure.
|
||||
* This is done after some set of transaction commit's has released those
|
||||
* This is done after some set of transaction commits have released those
|
||||
* buffers from our grip.
|
||||
*/
|
||||
STATIC int
|
||||
|
@ -524,7 +524,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
|
||||
|
||||
/*
|
||||
* Copy out entries of shortform attribute lists for attr_list().
|
||||
* Shortform atrtribute lists are not stored in hashval sorted order.
|
||||
* Shortform attribute lists are not stored in hashval sorted order.
|
||||
* If the output buffer is not large enough to hold them all, then we
|
||||
* we have to calculate each entries' hashvalue and sort them before
|
||||
* we can begin returning them to the user.
|
||||
@ -1541,7 +1541,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
|
||||
/*
|
||||
* Check for the degenerate case of the block being empty.
|
||||
* If the block is empty, we'll simply delete it, no need to
|
||||
* coalesce it with a sibling block. We choose (aribtrarily)
|
||||
* coalesce it with a sibling block. We choose (arbitrarily)
|
||||
* to merge with the forward block unless it is NULL.
|
||||
*/
|
||||
if (count == 0) {
|
||||
|
@ -31,7 +31,7 @@
|
||||
* The behavior chain is ordered based on the 'position' number which
|
||||
* lives in the first field of the ops vector (higher numbers first).
|
||||
*
|
||||
* Attemps to insert duplicate ops result in an EINVAL return code.
|
||||
* Attempts to insert duplicate ops result in an EINVAL return code.
|
||||
* Otherwise, return 0 to indicate success.
|
||||
*/
|
||||
int
|
||||
@ -84,7 +84,7 @@ bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp)
|
||||
|
||||
/*
|
||||
* Remove a behavior descriptor from a position in a behavior chain;
|
||||
* the postition is guaranteed not to be the first position.
|
||||
* the position is guaranteed not to be the first position.
|
||||
* Should only be called by the bhv_remove() macro.
|
||||
*/
|
||||
void
|
||||
|
@ -39,7 +39,7 @@
|
||||
* behaviors is synchronized with operations-in-progress (oip's) so that
|
||||
* the oip's always see a consistent view of the chain.
|
||||
*
|
||||
* The term "interpostion" is used to refer to the act of inserting
|
||||
* The term "interposition" is used to refer to the act of inserting
|
||||
* a behavior such that it interposes on (i.e., is inserted in front
|
||||
* of) a particular other behavior. A key example of this is when a
|
||||
* system implementing distributed single system image wishes to
|
||||
@ -51,7 +51,7 @@
|
||||
*
|
||||
* Behavior synchronization is logic which is necessary under certain
|
||||
* circumstances that there is no conflict between ongoing operations
|
||||
* traversing the behavior chain and those dunamically modifying the
|
||||
* traversing the behavior chain and those dynamically modifying the
|
||||
* behavior chain. Because behavior synchronization adds extra overhead
|
||||
* to virtual operation invocation, we want to restrict, as much as
|
||||
* we can, the requirement for this extra code, to those situations
|
||||
|
@ -3467,113 +3467,6 @@ done:
|
||||
return error;
|
||||
}
|
||||
|
||||
xfs_bmbt_rec_t * /* pointer to found extent entry */
|
||||
xfs_bmap_do_search_extents(
|
||||
xfs_bmbt_rec_t *base, /* base of extent list */
|
||||
xfs_extnum_t lastx, /* last extent index used */
|
||||
xfs_extnum_t nextents, /* number of file extents */
|
||||
xfs_fileoff_t bno, /* block number searched for */
|
||||
int *eofp, /* out: end of file found */
|
||||
xfs_extnum_t *lastxp, /* out: last extent index */
|
||||
xfs_bmbt_irec_t *gotp, /* out: extent entry found */
|
||||
xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
|
||||
{
|
||||
xfs_bmbt_rec_t *ep; /* extent list entry pointer */
|
||||
xfs_bmbt_irec_t got; /* extent list entry, decoded */
|
||||
int high; /* high index of binary search */
|
||||
int low; /* low index of binary search */
|
||||
|
||||
/*
|
||||
* Initialize the extent entry structure to catch access to
|
||||
* uninitialized br_startblock field.
|
||||
*/
|
||||
got.br_startoff = 0xffa5a5a5a5a5a5a5LL;
|
||||
got.br_blockcount = 0xa55a5a5a5a5a5a5aLL;
|
||||
got.br_state = XFS_EXT_INVALID;
|
||||
|
||||
#if XFS_BIG_BLKNOS
|
||||
got.br_startblock = 0xffffa5a5a5a5a5a5LL;
|
||||
#else
|
||||
got.br_startblock = 0xffffa5a5;
|
||||
#endif
|
||||
|
||||
if (lastx != NULLEXTNUM && lastx < nextents)
|
||||
ep = base + lastx;
|
||||
else
|
||||
ep = NULL;
|
||||
prevp->br_startoff = NULLFILEOFF;
|
||||
if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) &&
|
||||
bno < got.br_startoff +
|
||||
(got.br_blockcount = xfs_bmbt_get_blockcount(ep)))
|
||||
*eofp = 0;
|
||||
else if (ep && lastx < nextents - 1 &&
|
||||
bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) &&
|
||||
bno < got.br_startoff +
|
||||
(got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) {
|
||||
lastx++;
|
||||
ep++;
|
||||
*eofp = 0;
|
||||
} else if (nextents == 0)
|
||||
*eofp = 1;
|
||||
else if (bno == 0 &&
|
||||
(got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) {
|
||||
ep = base;
|
||||
lastx = 0;
|
||||
got.br_blockcount = xfs_bmbt_get_blockcount(ep);
|
||||
*eofp = 0;
|
||||
} else {
|
||||
low = 0;
|
||||
high = nextents - 1;
|
||||
/* binary search the extents array */
|
||||
while (low <= high) {
|
||||
XFS_STATS_INC(xs_cmp_exlist);
|
||||
lastx = (low + high) >> 1;
|
||||
ep = base + lastx;
|
||||
got.br_startoff = xfs_bmbt_get_startoff(ep);
|
||||
got.br_blockcount = xfs_bmbt_get_blockcount(ep);
|
||||
if (bno < got.br_startoff)
|
||||
high = lastx - 1;
|
||||
else if (bno >= got.br_startoff + got.br_blockcount)
|
||||
low = lastx + 1;
|
||||
else {
|
||||
got.br_startblock = xfs_bmbt_get_startblock(ep);
|
||||
got.br_state = xfs_bmbt_get_state(ep);
|
||||
*eofp = 0;
|
||||
*lastxp = lastx;
|
||||
*gotp = got;
|
||||
return ep;
|
||||
}
|
||||
}
|
||||
if (bno >= got.br_startoff + got.br_blockcount) {
|
||||
lastx++;
|
||||
if (lastx == nextents) {
|
||||
*eofp = 1;
|
||||
got.br_startblock = xfs_bmbt_get_startblock(ep);
|
||||
got.br_state = xfs_bmbt_get_state(ep);
|
||||
*prevp = got;
|
||||
ep = NULL;
|
||||
} else {
|
||||
*eofp = 0;
|
||||
xfs_bmbt_get_all(ep, prevp);
|
||||
ep++;
|
||||
got.br_startoff = xfs_bmbt_get_startoff(ep);
|
||||
got.br_blockcount = xfs_bmbt_get_blockcount(ep);
|
||||
}
|
||||
} else {
|
||||
*eofp = 0;
|
||||
if (ep > base)
|
||||
xfs_bmbt_get_all(ep - 1, prevp);
|
||||
}
|
||||
}
|
||||
if (ep) {
|
||||
got.br_startblock = xfs_bmbt_get_startblock(ep);
|
||||
got.br_state = xfs_bmbt_get_state(ep);
|
||||
}
|
||||
*lastxp = lastx;
|
||||
*gotp = got;
|
||||
return ep;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search the extent records for the entry containing block bno.
|
||||
* If bno lies in a hole, point to the next entry. If bno lies
|
||||
|
@ -362,14 +362,6 @@ xfs_bmbt_rec_t *
|
||||
xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
|
||||
xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
|
||||
|
||||
/*
|
||||
* Search an extent list for the extent which includes block
|
||||
* bno.
|
||||
*/
|
||||
xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
|
||||
xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
|
||||
xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_BMAP_H__ */
|
||||
|
@ -98,12 +98,12 @@ xfs_buf_item_flush_log_debug(
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called to verify that our caller's have logged
|
||||
* This function is called to verify that our callers have logged
|
||||
* all the bytes that they changed.
|
||||
*
|
||||
* It does this by comparing the original copy of the buffer stored in
|
||||
* the buf log item's bli_orig array to the current copy of the buffer
|
||||
* and ensuring that all bytes which miscompare are set in the bli_logged
|
||||
* and ensuring that all bytes which mismatch are set in the bli_logged
|
||||
* array of the buf log item.
|
||||
*/
|
||||
STATIC void
|
||||
|
@ -38,7 +38,7 @@ typedef struct xfs_cap_set {
|
||||
/*
|
||||
* For Linux, we take the bitfields directly from capability.h
|
||||
* and no longer attempt to keep this attribute ondisk compatible
|
||||
* with IRIX. Since this attribute is only set on exectuables,
|
||||
* with IRIX. Since this attribute is only set on executables,
|
||||
* it just doesn't make much sense to try. We do use a different
|
||||
* named attribute though, to avoid confusion.
|
||||
*/
|
||||
|
@ -840,7 +840,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
||||
/*
|
||||
* Check for the degenerate case of the block being empty.
|
||||
* If the block is empty, we'll simply delete it, no need to
|
||||
* coalesce it with a sibling block. We choose (aribtrarily)
|
||||
* coalesce it with a sibling block. We choose (arbitrarily)
|
||||
* to merge with the forward block unless it is NULL.
|
||||
*/
|
||||
if (count == 0) {
|
||||
|
@ -533,7 +533,7 @@ xfs_dir2_block_getdents(
|
||||
|
||||
/*
|
||||
* Reached the end of the block.
|
||||
* Set the offset to a nonexistent block 1 and return.
|
||||
* Set the offset to a non-existent block 1 and return.
|
||||
*/
|
||||
*eofp = 1;
|
||||
|
||||
|
@ -515,7 +515,7 @@ xfs_dir2_leaf_addname(
|
||||
ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
|
||||
XFS_DIR2_NULL_DATAPTR);
|
||||
/*
|
||||
* Copy entries down to copver the stale entry
|
||||
* Copy entries down to cover the stale entry
|
||||
* and make room for the new entry.
|
||||
*/
|
||||
if (highstale - index > 0)
|
||||
|
@ -830,7 +830,7 @@ xfs_dir2_leafn_rebalance(
|
||||
state->inleaf = 1;
|
||||
blk2->index = 0;
|
||||
cmn_err(CE_ALERT,
|
||||
"xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: "
|
||||
"xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: "
|
||||
"blk1->index %d\n",
|
||||
blk1->index);
|
||||
}
|
||||
|
@ -1341,7 +1341,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
|
||||
/*
|
||||
* Check for the degenerate case of the block being empty.
|
||||
* If the block is empty, we'll simply delete it, no need to
|
||||
* coalesce it with a sibling block. We choose (aribtrarily)
|
||||
* coalesce it with a sibling block. We choose (arbitrarily)
|
||||
* to merge with the forward block unless it is NULL.
|
||||
*/
|
||||
if (count == 0) {
|
||||
|
@ -477,7 +477,7 @@ xfs_fs_counts(
|
||||
*
|
||||
* xfs_reserve_blocks is called to set m_resblks
|
||||
* in the in-core mount table. The number of unused reserved blocks
|
||||
* is kept in m_resbls_avail.
|
||||
* is kept in m_resblks_avail.
|
||||
*
|
||||
* Reserve the requested number of blocks if available. Otherwise return
|
||||
* as many as possible to satisfy the request. The actual number
|
||||
|
@ -136,7 +136,7 @@ xfs_ialloc_ag_alloc(
|
||||
int ninodes; /* num inodes per buf */
|
||||
xfs_agino_t thisino; /* current inode number, for loop */
|
||||
int version; /* inode version number to use */
|
||||
int isaligned; /* inode allocation at stripe unit */
|
||||
int isaligned = 0; /* inode allocation at stripe unit */
|
||||
/* boundary */
|
||||
|
||||
args.tp = tp;
|
||||
@ -152,47 +152,75 @@ xfs_ialloc_ag_alloc(
|
||||
return XFS_ERROR(ENOSPC);
|
||||
args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
|
||||
/*
|
||||
* Set the alignment for the allocation.
|
||||
* If stripe alignment is turned on then align at stripe unit
|
||||
* boundary.
|
||||
* If the cluster size is smaller than a filesystem block
|
||||
* then we're doing I/O for inodes in filesystem block size pieces,
|
||||
* so don't need alignment anyway.
|
||||
*/
|
||||
isaligned = 0;
|
||||
if (args.mp->m_sinoalign) {
|
||||
ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
|
||||
args.alignment = args.mp->m_dalign;
|
||||
isaligned = 1;
|
||||
} else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
|
||||
args.mp->m_sb.sb_inoalignmt >=
|
||||
XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
|
||||
args.alignment = args.mp->m_sb.sb_inoalignmt;
|
||||
else
|
||||
args.alignment = 1;
|
||||
* First try to allocate inodes contiguous with the last-allocated
|
||||
* chunk of inodes. If the filesystem is striped, this will fill
|
||||
* an entire stripe unit with inodes.
|
||||
*/
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
/*
|
||||
* Need to figure out where to allocate the inode blocks.
|
||||
* Ideally they should be spaced out through the a.g.
|
||||
* For now, just allocate blocks up front.
|
||||
*/
|
||||
args.agbno = be32_to_cpu(agi->agi_root);
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno),
|
||||
args.agbno);
|
||||
/*
|
||||
* Allocate a fixed-size extent of inodes.
|
||||
*/
|
||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||
args.mod = args.total = args.wasdel = args.isfl = args.userdata =
|
||||
args.minalignslop = 0;
|
||||
args.prod = 1;
|
||||
/*
|
||||
* Allow space for the inode btree to split.
|
||||
*/
|
||||
args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
return error;
|
||||
newino = be32_to_cpu(agi->agi_newino);
|
||||
if(likely(newino != NULLAGINO)) {
|
||||
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
|
||||
XFS_IALLOC_BLOCKS(args.mp);
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp,
|
||||
be32_to_cpu(agi->agi_seqno), args.agbno);
|
||||
args.type = XFS_ALLOCTYPE_THIS_BNO;
|
||||
args.mod = args.total = args.wasdel = args.isfl =
|
||||
args.userdata = args.minalignslop = 0;
|
||||
args.prod = 1;
|
||||
args.alignment = 1;
|
||||
/*
|
||||
* Allow space for the inode btree to split.
|
||||
*/
|
||||
args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
return error;
|
||||
} else
|
||||
args.fsbno = NULLFSBLOCK;
|
||||
|
||||
if (unlikely(args.fsbno == NULLFSBLOCK)) {
|
||||
/*
|
||||
* Set the alignment for the allocation.
|
||||
* If stripe alignment is turned on then align at stripe unit
|
||||
* boundary.
|
||||
* If the cluster size is smaller than a filesystem block
|
||||
* then we're doing I/O for inodes in filesystem block size
|
||||
* pieces, so don't need alignment anyway.
|
||||
*/
|
||||
isaligned = 0;
|
||||
if (args.mp->m_sinoalign) {
|
||||
ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
|
||||
args.alignment = args.mp->m_dalign;
|
||||
isaligned = 1;
|
||||
} else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
|
||||
args.mp->m_sb.sb_inoalignmt >=
|
||||
XFS_B_TO_FSBT(args.mp,
|
||||
XFS_INODE_CLUSTER_SIZE(args.mp)))
|
||||
args.alignment = args.mp->m_sb.sb_inoalignmt;
|
||||
else
|
||||
args.alignment = 1;
|
||||
/*
|
||||
* Need to figure out where to allocate the inode blocks.
|
||||
* Ideally they should be spaced out through the a.g.
|
||||
* For now, just allocate blocks up front.
|
||||
*/
|
||||
args.agbno = be32_to_cpu(agi->agi_root);
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp,
|
||||
be32_to_cpu(agi->agi_seqno), args.agbno);
|
||||
/*
|
||||
* Allocate a fixed-size extent of inodes.
|
||||
*/
|
||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||
args.mod = args.total = args.wasdel = args.isfl =
|
||||
args.userdata = args.minalignslop = 0;
|
||||
args.prod = 1;
|
||||
/*
|
||||
* Allow space for the inode btree to split.
|
||||
*/
|
||||
args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If stripe alignment is turned on, then try again with cluster
|
||||
* alignment.
|
||||
@ -1023,7 +1051,7 @@ xfs_difree(
|
||||
rec.ir_freecount++;
|
||||
|
||||
/*
|
||||
* When an inode cluster is free, it becomes elgible for removal
|
||||
* When an inode cluster is free, it becomes eligible for removal
|
||||
*/
|
||||
if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
|
||||
(rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
|
||||
|
@ -509,7 +509,7 @@ retry:
|
||||
} else {
|
||||
/*
|
||||
* If the inode is not fully constructed due to
|
||||
* filehandle mistmatches wait for the inode to go
|
||||
* filehandle mismatches wait for the inode to go
|
||||
* away and try again.
|
||||
*
|
||||
* iget_locked will call __wait_on_freeing_inode
|
||||
|
@ -160,7 +160,7 @@ xfs_inotobp(
|
||||
xfs_dinode_t *dip;
|
||||
|
||||
/*
|
||||
* Call the space managment code to find the location of the
|
||||
* Call the space management code to find the location of the
|
||||
* inode on disk.
|
||||
*/
|
||||
imap.im_blkno = 0;
|
||||
@ -837,7 +837,7 @@ xfs_dic2xflags(
|
||||
|
||||
/*
|
||||
* Given a mount structure and an inode number, return a pointer
|
||||
* to a newly allocated in-core inode coresponding to the given
|
||||
* to a newly allocated in-core inode corresponding to the given
|
||||
* inode number.
|
||||
*
|
||||
* Initialize the inode's attributes and extent pointers if it
|
||||
@ -2723,7 +2723,7 @@ xfs_ipin(
|
||||
/*
|
||||
* Decrement the pin count of the given inode, and wake up
|
||||
* anyone in xfs_iwait_unpin() if the count goes to 0. The
|
||||
* inode must have been previoulsy pinned with a call to xfs_ipin().
|
||||
* inode must have been previously pinned with a call to xfs_ipin().
|
||||
*/
|
||||
void
|
||||
xfs_iunpin(
|
||||
@ -3690,7 +3690,7 @@ void
|
||||
xfs_iext_add(
|
||||
xfs_ifork_t *ifp, /* inode fork pointer */
|
||||
xfs_extnum_t idx, /* index to begin adding exts */
|
||||
int ext_diff) /* nubmer of extents to add */
|
||||
int ext_diff) /* number of extents to add */
|
||||
{
|
||||
int byte_diff; /* new bytes being added */
|
||||
int new_size; /* size of extents after adding */
|
||||
@ -4038,7 +4038,7 @@ xfs_iext_remove_indirect(
|
||||
xfs_extnum_t ext_diff; /* extents to remove in current list */
|
||||
xfs_extnum_t nex1; /* number of extents before idx */
|
||||
xfs_extnum_t nex2; /* extents after idx + count */
|
||||
int nlists; /* entries in indirecton array */
|
||||
int nlists; /* entries in indirection array */
|
||||
int page_idx = idx; /* index in target extent list */
|
||||
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
|
||||
@ -4291,9 +4291,9 @@ xfs_iext_bno_to_ext(
|
||||
xfs_filblks_t blockcount = 0; /* number of blocks in extent */
|
||||
xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */
|
||||
xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
|
||||
int high; /* upper boundry in search */
|
||||
int high; /* upper boundary in search */
|
||||
xfs_extnum_t idx = 0; /* index of target extent */
|
||||
int low; /* lower boundry in search */
|
||||
int low; /* lower boundary in search */
|
||||
xfs_extnum_t nextents; /* number of file extents */
|
||||
xfs_fileoff_t startoff = 0; /* start offset of extent */
|
||||
|
||||
|
@ -580,7 +580,7 @@ xfs_inode_item_unpin_remove(
|
||||
* been or is in the process of being flushed, then (ideally) we'd like to
|
||||
* see if the inode's buffer is still incore, and if so give it a nudge.
|
||||
* We delay doing so until the pushbuf routine, though, to avoid holding
|
||||
* the AIL lock across a call to the blackhole which is the buffercache.
|
||||
* the AIL lock across a call to the blackhole which is the buffer cache.
|
||||
* Also we don't want to sleep in any device strategy routines, which can happen
|
||||
* if we do the subsequent bawrite in here.
|
||||
*/
|
||||
|
@ -272,7 +272,7 @@ xfs_bulkstat(
|
||||
size_t statstruct_size, /* sizeof struct filling */
|
||||
char __user *ubuffer, /* buffer with inode stats */
|
||||
int flags, /* defined in xfs_itable.h */
|
||||
int *done) /* 1 if there're more stats to get */
|
||||
int *done) /* 1 if there are more stats to get */
|
||||
{
|
||||
xfs_agblock_t agbno=0;/* allocation group block number */
|
||||
xfs_buf_t *agbp; /* agi header buffer */
|
||||
@ -676,7 +676,7 @@ xfs_bulkstat_single(
|
||||
xfs_mount_t *mp, /* mount point for filesystem */
|
||||
xfs_ino_t *lastinop, /* inode to return */
|
||||
char __user *buffer, /* buffer with inode stats */
|
||||
int *done) /* 1 if there're more stats to get */
|
||||
int *done) /* 1 if there are more stats to get */
|
||||
{
|
||||
int count; /* count value for bulkstat call */
|
||||
int error; /* return value */
|
||||
|
@ -60,7 +60,7 @@ xfs_bulkstat(
|
||||
size_t statstruct_size,/* sizeof struct that we're filling */
|
||||
char __user *ubuffer,/* buffer with inode stats */
|
||||
int flags, /* flag to control access method */
|
||||
int *done); /* 1 if there're more stats to get */
|
||||
int *done); /* 1 if there are more stats to get */
|
||||
|
||||
int
|
||||
xfs_bulkstat_single(
|
||||
|
@ -59,7 +59,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
|
||||
int num_bblks);
|
||||
STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
|
||||
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
|
||||
STATIC void xlog_unalloc_log(xlog_t *log);
|
||||
STATIC void xlog_dealloc_log(xlog_t *log);
|
||||
STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
|
||||
int nentries, xfs_log_ticket_t tic,
|
||||
xfs_lsn_t *start_lsn,
|
||||
@ -304,7 +304,7 @@ xfs_log_done(xfs_mount_t *mp,
|
||||
if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
|
||||
(flags & XFS_LOG_REL_PERM_RESERV)) {
|
||||
/*
|
||||
* Release ticket if not permanent reservation or a specifc
|
||||
* Release ticket if not permanent reservation or a specific
|
||||
* request has been made to release a permanent reservation.
|
||||
*/
|
||||
xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
|
||||
@ -511,7 +511,7 @@ xfs_log_mount(xfs_mount_t *mp,
|
||||
vfsp->vfs_flag |= VFS_RDONLY;
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
|
||||
xlog_unalloc_log(mp->m_log);
|
||||
xlog_dealloc_log(mp->m_log);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
@ -667,7 +667,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
||||
*
|
||||
* Go through the motions of sync'ing and releasing
|
||||
* the iclog, even though no I/O will actually happen,
|
||||
* we need to wait for other log I/O's that may already
|
||||
* we need to wait for other log I/Os that may already
|
||||
* be in progress. Do this as a separate section of
|
||||
* code so we'll know if we ever get stuck here that
|
||||
* we're in this odd situation of trying to unmount
|
||||
@ -704,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
||||
void
|
||||
xfs_log_unmount_dealloc(xfs_mount_t *mp)
|
||||
{
|
||||
xlog_unalloc_log(mp->m_log);
|
||||
xlog_dealloc_log(mp->m_log);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1492,7 +1492,7 @@ xlog_sync(xlog_t *log,
|
||||
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
|
||||
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
|
||||
|
||||
/* account for internal log which does't start at block #0 */
|
||||
/* account for internal log which doesn't start at block #0 */
|
||||
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
|
||||
XFS_BUF_WRITE(bp);
|
||||
if ((error = XFS_bwrite(bp))) {
|
||||
@ -1506,10 +1506,10 @@ xlog_sync(xlog_t *log,
|
||||
|
||||
|
||||
/*
|
||||
* Unallocate a log structure
|
||||
* Deallocate a log structure
|
||||
*/
|
||||
void
|
||||
xlog_unalloc_log(xlog_t *log)
|
||||
xlog_dealloc_log(xlog_t *log)
|
||||
{
|
||||
xlog_in_core_t *iclog, *next_iclog;
|
||||
xlog_ticket_t *tic, *next_tic;
|
||||
@ -1539,7 +1539,7 @@ xlog_unalloc_log(xlog_t *log)
|
||||
if ((log->l_ticket_cnt != log->l_ticket_tcnt) &&
|
||||
!XLOG_FORCED_SHUTDOWN(log)) {
|
||||
xfs_fs_cmn_err(CE_WARN, log->l_mp,
|
||||
"xlog_unalloc_log: (cnt: %d, total: %d)",
|
||||
"xlog_dealloc_log: (cnt: %d, total: %d)",
|
||||
log->l_ticket_cnt, log->l_ticket_tcnt);
|
||||
/* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
|
||||
|
||||
@ -1562,7 +1562,7 @@ xlog_unalloc_log(xlog_t *log)
|
||||
#endif
|
||||
log->l_mp->m_log = NULL;
|
||||
kmem_free(log, sizeof(xlog_t));
|
||||
} /* xlog_unalloc_log */
|
||||
} /* xlog_dealloc_log */
|
||||
|
||||
/*
|
||||
* Update counters atomically now that memcpy is done.
|
||||
@ -2829,7 +2829,7 @@ xlog_state_release_iclog(xlog_t *log,
|
||||
|
||||
/*
|
||||
* We let the log lock go, so it's possible that we hit a log I/O
|
||||
* error or someother SHUTDOWN condition that marks the iclog
|
||||
* error or some other SHUTDOWN condition that marks the iclog
|
||||
* as XLOG_STATE_IOERROR before the bwrite. However, we know that
|
||||
* this iclog has consistent data, so we ignore IOERROR
|
||||
* flags after this point.
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* By comparing each compnent, we don't have to worry about extra
|
||||
* By comparing each component, we don't have to worry about extra
|
||||
* endian issues in treating two 32 bit numbers as one 64 bit number
|
||||
*/
|
||||
static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
|
||||
|
@ -583,7 +583,7 @@ xlog_find_head(
|
||||
* x | x ... | x - 1 | x
|
||||
* Another case that fits this picture would be
|
||||
* x | x + 1 | x ... | x
|
||||
* In this case the head really is somwhere at the end of the
|
||||
* In this case the head really is somewhere at the end of the
|
||||
* log, as one of the latest writes at the beginning was
|
||||
* incomplete.
|
||||
* One more case is
|
||||
@ -2799,7 +2799,7 @@ xlog_recover_do_trans(
|
||||
* we don't need to worry about the block number being
|
||||
* truncated in > 1 TB buffers because in user-land,
|
||||
* we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
|
||||
* the blkno's will get through the user-mode buffer
|
||||
* the blknos will get through the user-mode buffer
|
||||
* cache properly. The only bad case is o32 kernels
|
||||
* where xfs_daddr_t is 32-bits but mount will warn us
|
||||
* off a > 1 TB filesystem before we get here.
|
||||
|
@ -393,7 +393,7 @@ xfs_initialize_perag(
|
||||
break;
|
||||
}
|
||||
|
||||
/* This ag is prefered for inodes */
|
||||
/* This ag is preferred for inodes */
|
||||
pag = &mp->m_perag[index];
|
||||
pag->pagi_inodeok = 1;
|
||||
if (index < max_metadata)
|
||||
@ -1728,7 +1728,7 @@ xfs_mount_log_sbunit(
|
||||
* We cannot use the hotcpu_register() function because it does
|
||||
* not allow notifier instances. We need a notifier per filesystem
|
||||
* as we need to be able to identify the filesystem to balance
|
||||
* the counters out. This is acheived by having a notifier block
|
||||
* the counters out. This is achieved by having a notifier block
|
||||
* embedded in the xfs_mount_t and doing pointer magic to get the
|
||||
* mount pointer from the notifier block address.
|
||||
*/
|
||||
|
@ -379,7 +379,7 @@ typedef struct xfs_mount {
|
||||
#endif
|
||||
int m_dalign; /* stripe unit */
|
||||
int m_swidth; /* stripe width */
|
||||
int m_sinoalign; /* stripe unit inode alignmnt */
|
||||
int m_sinoalign; /* stripe unit inode alignment */
|
||||
int m_attr_magicpct;/* 37% of the blocksize */
|
||||
int m_dir_magicpct; /* 37% of the dir blocksize */
|
||||
__uint8_t m_mk_sharedro; /* mark shared ro on unmount */
|
||||
|
@ -31,7 +31,7 @@
|
||||
typedef __uint32_t xfs_dqid_t;
|
||||
|
||||
/*
|
||||
* Eventhough users may not have quota limits occupying all 64-bits,
|
||||
* Even though users may not have quota limits occupying all 64-bits,
|
||||
* they may need 64-bit accounting. Hence, 64-bit quota-counters,
|
||||
* and quota-limits. This is a waste in the common case, but hey ...
|
||||
*/
|
||||
@ -246,7 +246,7 @@ typedef struct xfs_qoff_logformat {
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* This check is done typically without holding the inode lock;
|
||||
* that may seem racey, but it is harmless in the context that it is used.
|
||||
* that may seem racy, but it is harmless in the context that it is used.
|
||||
* The inode cannot go inactive as long a reference is kept, and
|
||||
* therefore if dquot(s) were attached, they'll stay consistent.
|
||||
* If, for example, the ownership of the inode changes while
|
||||
|
@ -490,7 +490,7 @@ xfs_trans_mod_sb(
|
||||
case XFS_TRANS_SB_RES_FREXTENTS:
|
||||
/*
|
||||
* The allocation has already been applied to the
|
||||
* in-core superblocks's counter. This should only
|
||||
* in-core superblock's counter. This should only
|
||||
* be applied to the on-disk superblock.
|
||||
*/
|
||||
ASSERT(delta < 0);
|
||||
@ -611,7 +611,7 @@ xfs_trans_apply_sb_deltas(
|
||||
|
||||
if (whole)
|
||||
/*
|
||||
* Log the whole thing, the fields are discontiguous.
|
||||
* Log the whole thing, the fields are noncontiguous.
|
||||
*/
|
||||
xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1);
|
||||
else
|
||||
@ -669,7 +669,7 @@ xfs_trans_unreserve_and_mod_sb(
|
||||
/*
|
||||
* Apply any superblock modifications to the in-core version.
|
||||
* The t_res_fdblocks_delta and t_res_frextents_delta fields are
|
||||
* explicity NOT applied to the in-core superblock.
|
||||
* explicitly NOT applied to the in-core superblock.
|
||||
* The idea is that that has already been done.
|
||||
*/
|
||||
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
|
||||
|
@ -354,7 +354,7 @@ typedef struct xfs_trans {
|
||||
xfs_lsn_t t_commit_lsn; /* log seq num of end of
|
||||
* transaction. */
|
||||
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
|
||||
struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */
|
||||
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
|
||||
xfs_trans_callback_t t_callback; /* transaction callback */
|
||||
void *t_callarg; /* callback arg */
|
||||
unsigned int t_flags; /* misc flags */
|
||||
|
@ -272,7 +272,7 @@ xfs_trans_log_inode(
|
||||
* This is to coordinate with the xfs_iflush() and xfs_iflush_done()
|
||||
* routines in the eventual clearing of the ilf_fields bits.
|
||||
* See the big comment in xfs_iflush() for an explanation of
|
||||
* this coorination mechanism.
|
||||
* this coordination mechanism.
|
||||
*/
|
||||
flags |= ip->i_itemp->ili_last_fields;
|
||||
ip->i_itemp->ili_format.ilf_fields |= flags;
|
||||
|
@ -880,10 +880,10 @@ xfs_statvfs(
|
||||
* determine if they should be flushed sync, async, or
|
||||
* delwri.
|
||||
* SYNC_CLOSE - This flag is passed when the system is being
|
||||
* unmounted. We should sync and invalidate everthing.
|
||||
* unmounted. We should sync and invalidate everything.
|
||||
* SYNC_FSDATA - This indicates that the caller would like to make
|
||||
* sure the superblock is safe on disk. We can ensure
|
||||
* this by simply makeing sure the log gets flushed
|
||||
* this by simply making sure the log gets flushed
|
||||
* if SYNC_BDFLUSH is set, and by actually writing it
|
||||
* out otherwise.
|
||||
*
|
||||
@ -908,7 +908,7 @@ xfs_sync(
|
||||
*
|
||||
* This routine supports all of the flags defined for the generic VFS_SYNC
|
||||
* interface as explained above under xfs_sync. In the interests of not
|
||||
* changing interfaces within the 6.5 family, additional internallly-
|
||||
* changing interfaces within the 6.5 family, additional internally-
|
||||
* required functions are specified within a separate xflags parameter,
|
||||
* only available by calling this routine.
|
||||
*
|
||||
@ -1090,7 +1090,7 @@ xfs_sync_inodes(
|
||||
* If this is just vfs_sync() or pflushd() calling
|
||||
* then we can skip inodes for which it looks like
|
||||
* there is nothing to do. Since we don't have the
|
||||
* inode locked this is racey, but these are periodic
|
||||
* inode locked this is racy, but these are periodic
|
||||
* calls so it doesn't matter. For the others we want
|
||||
* to know for sure, so we at least try to lock them.
|
||||
*/
|
||||
@ -1429,7 +1429,7 @@ xfs_sync_inodes(
|
||||
*
|
||||
* This routine supports all of the flags defined for the generic VFS_SYNC
|
||||
* interface as explained above under xfs_sync. In the interests of not
|
||||
* changing interfaces within the 6.5 family, additional internallly-
|
||||
* changing interfaces within the 6.5 family, additional internally-
|
||||
* required functions are specified within a separate xflags parameter,
|
||||
* only available by calling this routine.
|
||||
*
|
||||
|
@ -848,7 +848,7 @@ xfs_setattr(
|
||||
* If this is a synchronous mount, make sure that the
|
||||
* transaction goes to disk before returning to the user.
|
||||
* This is slightly sub-optimal in that truncates require
|
||||
* two sync transactions instead of one for wsync filesytems.
|
||||
* two sync transactions instead of one for wsync filesystems.
|
||||
* One for the truncate and one for the timestamps since we
|
||||
* don't want to change the timestamps unless we're sure the
|
||||
* truncate worked. Truncates are less than 1% of the laddis
|
||||
@ -1170,7 +1170,7 @@ xfs_fsync(
|
||||
|
||||
/*
|
||||
* If this inode is on the RT dev we need to flush that
|
||||
* cache aswell.
|
||||
* cache as well.
|
||||
*/
|
||||
if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
|
||||
xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
|
||||
@ -1380,7 +1380,7 @@ xfs_inactive_symlink_rmt(
|
||||
*/
|
||||
ntp = xfs_trans_dup(tp);
|
||||
/*
|
||||
* Commit the transaction containing extent freeing and EFD's.
|
||||
* Commit the transaction containing extent freeing and EFDs.
|
||||
* If we get an error on the commit here or on the reserve below,
|
||||
* we need to unlock the inode since the new transaction doesn't
|
||||
* have the inode attached.
|
||||
@ -2023,7 +2023,7 @@ xfs_create(
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
|
||||
/*
|
||||
* Propogate the fact that the vnode changed after the
|
||||
* Propagate the fact that the vnode changed after the
|
||||
* xfs_inode locks have been released.
|
||||
*/
|
||||
VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
|
||||
@ -2370,7 +2370,7 @@ xfs_remove(
|
||||
* for a log reservation. Since we'll have to wait for the
|
||||
* inactive code to complete before returning from xfs_iget,
|
||||
* we need to make sure that we don't have log space reserved
|
||||
* when we call xfs_iget. Instead we get an unlocked referece
|
||||
* when we call xfs_iget. Instead we get an unlocked reference
|
||||
* to the inode before getting our log reservation.
|
||||
*/
|
||||
error = xfs_get_dir_entry(dentry, &ip);
|
||||
@ -3020,7 +3020,7 @@ xfs_rmdir(
|
||||
* for a log reservation. Since we'll have to wait for the
|
||||
* inactive code to complete before returning from xfs_iget,
|
||||
* we need to make sure that we don't have log space reserved
|
||||
* when we call xfs_iget. Instead we get an unlocked referece
|
||||
* when we call xfs_iget. Instead we get an unlocked reference
|
||||
* to the inode before getting our log reservation.
|
||||
*/
|
||||
error = xfs_get_dir_entry(dentry, &cdp);
|
||||
|
Loading…
x
Reference in New Issue
Block a user