mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
xfs: kill xfs_bioerror_relse
There is only one caller now - xfs_trans_read_buf_map() - and it has very well defined call semantics - read, synchronous, and b_iodone is NULL. Hence it's pretty clear what error handling is necessary for this case. The bigger problem of untangling xfs_trans_read_buf_map error handling is left to a future patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
parent
2718775469
commit
8b131973d1
@ -1074,45 +1074,6 @@ xfs_buf_ioerror_alert(
|
|||||||
(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
|
(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Same as xfs_bioerror, except that we are releasing the buffer
|
|
||||||
* here ourselves, and avoiding the xfs_buf_ioend call.
|
|
||||||
* This is meant for userdata errors; metadata bufs come with
|
|
||||||
* iodone functions attached, so that we can track down errors.
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
xfs_bioerror_relse(
|
|
||||||
struct xfs_buf *bp)
|
|
||||||
{
|
|
||||||
int64_t fl = bp->b_flags;
|
|
||||||
/*
|
|
||||||
* No need to wait until the buffer is unpinned.
|
|
||||||
* We aren't flushing it.
|
|
||||||
*
|
|
||||||
* chunkhold expects B_DONE to be set, whether
|
|
||||||
* we actually finish the I/O or not. We don't want to
|
|
||||||
* change that interface.
|
|
||||||
*/
|
|
||||||
XFS_BUF_UNREAD(bp);
|
|
||||||
XFS_BUF_DONE(bp);
|
|
||||||
xfs_buf_stale(bp);
|
|
||||||
bp->b_iodone = NULL;
|
|
||||||
if (!(fl & XBF_ASYNC)) {
|
|
||||||
/*
|
|
||||||
* Mark b_error and B_ERROR _both_.
|
|
||||||
* Lot's of chunkcache code assumes that.
|
|
||||||
* There's no reason to mark error for
|
|
||||||
* ASYNC buffers.
|
|
||||||
*/
|
|
||||||
xfs_buf_ioerror(bp, -EIO);
|
|
||||||
complete(&bp->b_iowait);
|
|
||||||
} else {
|
|
||||||
xfs_buf_relse(bp);
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
xfs_bwrite(
|
xfs_bwrite(
|
||||||
struct xfs_buf *bp)
|
struct xfs_buf *bp)
|
||||||
|
@ -297,8 +297,6 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
|
|||||||
#define xfs_buf_zero(bp, off, len) \
|
#define xfs_buf_zero(bp, off, len) \
|
||||||
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
|
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
|
||||||
|
|
||||||
extern int xfs_bioerror_relse(struct xfs_buf *);
|
|
||||||
|
|
||||||
/* Buffer Utility Routines */
|
/* Buffer Utility Routines */
|
||||||
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
|
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
|
||||||
|
|
||||||
|
@ -324,11 +324,14 @@ xfs_trans_read_buf_map(
|
|||||||
*/
|
*/
|
||||||
if (XFS_FORCED_SHUTDOWN(mp)) {
|
if (XFS_FORCED_SHUTDOWN(mp)) {
|
||||||
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
||||||
xfs_bioerror_relse(bp);
|
bp->b_flags &= ~(XBF_READ | XBF_DONE);
|
||||||
} else {
|
xfs_buf_ioerror(bp, -EIO);
|
||||||
xfs_buf_iorequest(bp);
|
xfs_buf_stale(bp);
|
||||||
|
xfs_buf_relse(bp);
|
||||||
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
xfs_buf_iorequest(bp);
|
||||||
error = xfs_buf_iowait(bp);
|
error = xfs_buf_iowait(bp);
|
||||||
if (error) {
|
if (error) {
|
||||||
xfs_buf_ioerror_alert(bp, __func__);
|
xfs_buf_ioerror_alert(bp, __func__);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user