netfs: Use bh-disabling spinlocks for rreq->lock

Use bh-disabling spinlocks when accessing rreq->lock because, in the
future, it may be twiddled from softirq context when cleanup is driven from
cache backend DIO completion.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-12-dhowells@redhat.com/ # v2
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
David Howells 2024-07-12 11:54:11 +01:00 committed by Christian Brauner
parent 24c90a79f6
commit 22de489d1e
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
2 changed files with 4 additions and 4 deletions

View File

@ -473,7 +473,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
cancel:
/* Remove if completely consumed. */
spin_lock(&wreq->lock);
spin_lock_bh(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
@ -489,7 +489,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
}
}
spin_unlock(&wreq->lock);
spin_unlock_bh(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :

View File

@ -191,7 +191,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
spin_lock(&wreq->lock);
spin_lock_bh(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@ -202,7 +202,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
spin_unlock(&wreq->lock);
spin_unlock_bh(&wreq->lock);
stream->construct = subreq;
}