mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
SUNRPC: Fix races with rpc_killall_tasks()
Ensure that we immediately call rpc_exit_task() after waking up, and that the tk_rpc_status cannot get clobbered by some other function. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
e4266f23ec
commit
39494194f9
@ -209,6 +209,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
|
||||
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
|
||||
void rpc_put_task(struct rpc_task *);
|
||||
void rpc_put_task_async(struct rpc_task *);
|
||||
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
|
||||
void rpc_signal_task(struct rpc_task *);
|
||||
void rpc_exit_task(struct rpc_task *);
|
||||
void rpc_exit(struct rpc_task *, int);
|
||||
|
@ -1642,7 +1642,7 @@ static void
|
||||
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
|
||||
{
|
||||
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
|
||||
task->tk_rpc_status = rpc_status;
|
||||
rpc_task_set_rpc_status(task, rpc_status);
|
||||
rpc_exit(task, tk_status);
|
||||
}
|
||||
|
||||
@ -2435,10 +2435,8 @@ rpc_check_timeout(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_clnt *clnt = task->tk_client;
|
||||
|
||||
if (RPC_SIGNALLED(task)) {
|
||||
rpc_call_rpcerror(task, -ERESTARTSYS);
|
||||
if (RPC_SIGNALLED(task))
|
||||
return;
|
||||
}
|
||||
|
||||
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
|
||||
return;
|
||||
|
@ -65,6 +65,13 @@ gfp_t rpc_task_gfp_mask(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
|
||||
|
||||
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
|
||||
{
|
||||
if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
rpc_task_timeout(const struct rpc_task *task)
|
||||
{
|
||||
@ -855,12 +862,14 @@ void rpc_signal_task(struct rpc_task *task)
|
||||
if (!RPC_IS_ACTIVATED(task))
|
||||
return;
|
||||
|
||||
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
|
||||
return;
|
||||
trace_rpc_task_signalled(task, task->tk_action);
|
||||
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
||||
smp_mb__after_atomic();
|
||||
queue = READ_ONCE(task->tk_waitqueue);
|
||||
if (queue)
|
||||
rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
|
||||
rpc_wake_up_queued_task(queue, task);
|
||||
}
|
||||
|
||||
void rpc_exit(struct rpc_task *task, int status)
|
||||
@ -907,10 +916,16 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
* Perform the next FSM step or a pending callback.
|
||||
*
|
||||
* tk_action may be NULL if the task has been killed.
|
||||
* In particular, note that rpc_killall_tasks may
|
||||
* do this at any time, so beware when dereferencing.
|
||||
*/
|
||||
do_action = task->tk_action;
|
||||
/* Tasks with an RPC error status should exit */
|
||||
if (do_action != rpc_exit_task &&
|
||||
(status = READ_ONCE(task->tk_rpc_status)) != 0) {
|
||||
task->tk_status = status;
|
||||
if (do_action != NULL)
|
||||
do_action = rpc_exit_task;
|
||||
}
|
||||
/* Callbacks override all actions */
|
||||
if (task->tk_callback) {
|
||||
do_action = task->tk_callback;
|
||||
task->tk_callback = NULL;
|
||||
@ -932,14 +947,6 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Signalled tasks should exit rather than sleep.
|
||||
*/
|
||||
if (RPC_SIGNALLED(task)) {
|
||||
task->tk_rpc_status = -ERESTARTSYS;
|
||||
rpc_exit(task, -ERESTARTSYS);
|
||||
}
|
||||
|
||||
/*
|
||||
* The queue->lock protects against races with
|
||||
* rpc_make_runnable().
|
||||
@ -955,6 +962,12 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
spin_unlock(&queue->lock);
|
||||
continue;
|
||||
}
|
||||
/* Wake up any task that has an exit status */
|
||||
if (READ_ONCE(task->tk_rpc_status) != 0) {
|
||||
rpc_wake_up_task_queue_locked(queue, task);
|
||||
spin_unlock(&queue->lock);
|
||||
continue;
|
||||
}
|
||||
rpc_clear_running(task);
|
||||
spin_unlock(&queue->lock);
|
||||
if (task_is_async)
|
||||
@ -972,10 +985,7 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
* clean up after sleeping on some queue, we don't
|
||||
* break the loop here, but go around once more.
|
||||
*/
|
||||
trace_rpc_task_signalled(task, task->tk_action);
|
||||
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
||||
task->tk_rpc_status = -ERESTARTSYS;
|
||||
rpc_exit(task, -ERESTARTSYS);
|
||||
rpc_signal_task(task);
|
||||
}
|
||||
trace_rpc_task_sync_wake(task, task->tk_action);
|
||||
}
|
||||
|
@ -1978,8 +1978,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
* we'll need to figure out how to pass a namespace to
|
||||
* connect.
|
||||
*/
|
||||
task->tk_rpc_status = -ENOTCONN;
|
||||
rpc_exit(task, -ENOTCONN);
|
||||
rpc_task_set_rpc_status(task, -ENOTCONN);
|
||||
goto out_wake;
|
||||
}
|
||||
ret = xs_local_setup_socket(transport);
|
||||
|
Loading…
x
Reference in New Issue
Block a user