mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
ufs: avoid grabbing ->truncate_mutex if possible
tail unpacking is done in a wrong place; the deadlocks galore is best dealt with by doing that in ->write_iter() (and switching to iomap, while we are at it), but that's rather painful to backport. The trouble comes from grabbing pages that cover the beginning of tail from inside of ufs_new_fragments(); ongoing pageout of any of those is going to deadlock on ->truncate_mutex with process that got around to extending the tail holding that and waiting for page to get unlocked, while ->writepage() on that page is waiting on ->truncate_mutex. The thing is, we don't need ->truncate_mutex when the fragment we are trying to map is within the tail - the damn thing is allocated (tail can't contain holes). Let's do a plain lookup and if the fragment is present, we can just pretend that we'd won the race in almost all cases. The only exception is a fragment between the end of tail and the end of block containing tail. Protect ->i_lastfrag with ->meta_lock - read_seqlock_excl() is sufficient. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
267309f394
commit
09bf4f5b6e
@ -423,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||
if (result) {
|
||||
ufs_clear_frags(inode, result + oldcount,
|
||||
newcount - oldcount, locked_page != NULL);
|
||||
*err = 0;
|
||||
write_seqlock(&UFS_I(inode)->meta_lock);
|
||||
ufs_cpu_to_data_ptr(sb, p, result);
|
||||
write_sequnlock(&UFS_I(inode)->meta_lock);
|
||||
*err = 0;
|
||||
UFS_I(inode)->i_lastfrag =
|
||||
max(UFS_I(inode)->i_lastfrag, fragment + count);
|
||||
write_sequnlock(&UFS_I(inode)->meta_lock);
|
||||
}
|
||||
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||
UFSD("EXIT, result %llu\n", (unsigned long long)result);
|
||||
@ -441,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||
result = ufs_add_fragments(inode, tmp, oldcount, newcount);
|
||||
if (result) {
|
||||
*err = 0;
|
||||
read_seqlock_excl(&UFS_I(inode)->meta_lock);
|
||||
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
|
||||
fragment + count);
|
||||
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
|
||||
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
|
||||
locked_page != NULL);
|
||||
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||
@ -479,12 +481,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||
ufs_change_blocknr(inode, fragment - oldcount, oldcount,
|
||||
uspi->s_sbbase + tmp,
|
||||
uspi->s_sbbase + result, locked_page);
|
||||
*err = 0;
|
||||
write_seqlock(&UFS_I(inode)->meta_lock);
|
||||
ufs_cpu_to_data_ptr(sb, p, result);
|
||||
write_sequnlock(&UFS_I(inode)->meta_lock);
|
||||
*err = 0;
|
||||
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
|
||||
fragment + count);
|
||||
write_sequnlock(&UFS_I(inode)->meta_lock);
|
||||
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||
if (newcount < request)
|
||||
ufs_free_fragments (inode, result + newcount, request - newcount);
|
||||
|
@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
|
||||
u64 phys64 = 0;
|
||||
unsigned frag = fragment & uspi->s_fpbmask;
|
||||
|
||||
if (!create) {
|
||||
phys64 = ufs_frag_map(inode, offsets, depth);
|
||||
if (phys64)
|
||||
map_bh(bh_result, sb, phys64 + frag);
|
||||
return 0;
|
||||
}
|
||||
phys64 = ufs_frag_map(inode, offsets, depth);
|
||||
if (!create)
|
||||
goto done;
|
||||
|
||||
if (phys64) {
|
||||
if (fragment >= UFS_NDIR_FRAGMENT)
|
||||
goto done;
|
||||
read_seqlock_excl(&UFS_I(inode)->meta_lock);
|
||||
if (fragment < UFS_I(inode)->i_lastfrag) {
|
||||
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
|
||||
goto done;
|
||||
}
|
||||
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
|
||||
}
|
||||
/* This code entered only while writing ....? */
|
||||
|
||||
mutex_lock(&UFS_I(inode)->truncate_mutex);
|
||||
@ -451,6 +458,11 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
|
||||
}
|
||||
mutex_unlock(&UFS_I(inode)->truncate_mutex);
|
||||
return err;
|
||||
|
||||
done:
|
||||
if (phys64)
|
||||
map_bh(bh_result, sb, phys64 + frag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
@ -1161,7 +1173,9 @@ static void ufs_truncate_blocks(struct inode *inode)
|
||||
free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
|
||||
}
|
||||
}
|
||||
read_seqlock_excl(&ufsi->meta_lock);
|
||||
ufsi->i_lastfrag = DIRECT_FRAGMENT;
|
||||
read_sequnlock_excl(&ufsi->meta_lock);
|
||||
mark_inode_dirty(inode);
|
||||
mutex_unlock(&ufsi->truncate_mutex);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user