mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
xfs: remove a superflous hash lookup when inserting new buffers
Currently on the slow path insert we repeat the initial hash table lookup before we attempt the insert, resulting in a two traversals of the hash table to ensure the insert is valid. The rhashtable API provides a method for an atomic lookup and insert operation, so we can avoid one of the hash table traversals by using this method. Adapted from a large patch containing this optimisation by Christoph Hellwig. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
d8d9bbb0ee
commit
32dd4f9c50
@ -623,8 +623,15 @@ xfs_buf_find_insert(
|
||||
}
|
||||
|
||||
spin_lock(&pag->pag_buf_lock);
|
||||
bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
|
||||
bp = rhashtable_lookup_get_insert_fast(&pag->pag_buf_hash,
|
||||
&new_bp->b_rhash_head, xfs_buf_hash_params);
|
||||
if (IS_ERR(bp)) {
|
||||
error = PTR_ERR(bp);
|
||||
spin_unlock(&pag->pag_buf_lock);
|
||||
goto out_free_buf;
|
||||
}
|
||||
if (bp) {
|
||||
/* found an existing buffer */
|
||||
atomic_inc(&bp->b_hold);
|
||||
spin_unlock(&pag->pag_buf_lock);
|
||||
error = xfs_buf_find_lock(bp, flags);
|
||||
@ -635,10 +642,8 @@ xfs_buf_find_insert(
|
||||
goto out_free_buf;
|
||||
}
|
||||
|
||||
/* The buffer keeps the perag reference until it is freed. */
|
||||
/* The new buffer keeps the perag reference until it is freed. */
|
||||
new_bp->b_pag = pag;
|
||||
rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
|
||||
xfs_buf_hash_params);
|
||||
spin_unlock(&pag->pag_buf_lock);
|
||||
*bpp = new_bp;
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user