mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
sunrpc: change sp_nrthreads from atomic_t to unsigned int.
sp_nrthreads is only ever accessed under the service mutex nlmsvc_mutex nfs_callback_mutex nfsd_mutex so these is no need for it to be an atomic_t. The fact that all code using it is single-threaded means that we can simplify svc_pool_victim and remove the temporary elevation of sp_nrthreads. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
parent
16ef80eedc
commit
60749cbe3d
@ -1769,7 +1769,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
|
||||
|
||||
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
|
||||
atomic_read(&sp->sp_nrthreads));
|
||||
sp->sp_nrthreads);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
@ -641,7 +641,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
|
||||
|
||||
if (serv)
|
||||
for (i = 0; i < serv->sv_nrpools && i < n; i++)
|
||||
nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
|
||||
nthreads[i] = serv->sv_pools[i].sp_nrthreads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -33,9 +33,9 @@
|
||||
* node traffic on multi-node NUMA NFS servers.
|
||||
*/
|
||||
struct svc_pool {
|
||||
unsigned int sp_id; /* pool id; also node id on NUMA */
|
||||
unsigned int sp_id; /* pool id; also node id on NUMA */
|
||||
struct lwq sp_xprts; /* pending transports */
|
||||
atomic_t sp_nrthreads; /* # of threads in pool */
|
||||
unsigned int sp_nrthreads; /* # of threads in pool */
|
||||
struct list_head sp_all_threads; /* all server threads */
|
||||
struct llist_head sp_idle_threads; /* idle server threads */
|
||||
|
||||
|
@ -725,7 +725,7 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
|
||||
serv->sv_nrthreads += 1;
|
||||
spin_unlock_bh(&serv->sv_lock);
|
||||
|
||||
atomic_inc(&pool->sp_nrthreads);
|
||||
pool->sp_nrthreads += 1;
|
||||
|
||||
/* Protected by whatever lock the service uses when calling
|
||||
* svc_set_num_threads()
|
||||
@ -780,31 +780,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
|
||||
struct svc_pool *pool;
|
||||
unsigned int i;
|
||||
|
||||
retry:
|
||||
pool = target_pool;
|
||||
|
||||
if (pool != NULL) {
|
||||
if (atomic_inc_not_zero(&pool->sp_nrthreads))
|
||||
goto found_pool;
|
||||
return NULL;
|
||||
} else {
|
||||
if (!pool) {
|
||||
for (i = 0; i < serv->sv_nrpools; i++) {
|
||||
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
|
||||
if (atomic_inc_not_zero(&pool->sp_nrthreads))
|
||||
goto found_pool;
|
||||
if (pool->sp_nrthreads)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
found_pool:
|
||||
set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
|
||||
set_bit(SP_NEED_VICTIM, &pool->sp_flags);
|
||||
if (!atomic_dec_and_test(&pool->sp_nrthreads))
|
||||
if (pool && pool->sp_nrthreads) {
|
||||
set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
|
||||
set_bit(SP_NEED_VICTIM, &pool->sp_flags);
|
||||
return pool;
|
||||
/* Nothing left in this pool any more */
|
||||
clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
|
||||
clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
|
||||
goto retry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -883,7 +874,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
|
||||
if (!pool)
|
||||
nrservs -= serv->sv_nrthreads;
|
||||
else
|
||||
nrservs -= atomic_read(&pool->sp_nrthreads);
|
||||
nrservs -= pool->sp_nrthreads;
|
||||
|
||||
if (nrservs > 0)
|
||||
return svc_start_kthreads(serv, pool, nrservs);
|
||||
@ -967,7 +958,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
|
||||
|
||||
list_del_rcu(&rqstp->rq_all);
|
||||
|
||||
atomic_dec(&pool->sp_nrthreads);
|
||||
pool->sp_nrthreads -= 1;
|
||||
|
||||
spin_lock_bh(&serv->sv_lock);
|
||||
serv->sv_nrthreads -= 1;
|
||||
|
Loading…
Reference in New Issue
Block a user