mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
cachefiles: make on-demand request distribution fairer
For now, enqueuing and dequeuing on-demand requests all start from idx 0, this makes request distribution unfair. In the weighty concurrent I/O scenario, the request stored in higher idx will starve. Searching requests cyclically in cachefiles_ondemand_daemon_read, makes distribution fairer. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Reported-by: Yongqing Li <liyongqing@bytedance.com> Signed-off-by: Xin Yin <yinxin.x@bytedance.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeffle Xu <jefflexu@linux.alibaba.com> Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20220817065200.11543-1-yinxin.x@bytedance.com/ # v1 Link: https://lore.kernel.org/r/20220825020945.2293-1-yinxin.x@bytedance.com/ # v2
This commit is contained in:
parent
c93ccd63b1
commit
1122f40072
@ -111,6 +111,7 @@ struct cachefiles_cache {
|
|||||||
char *tag; /* cache binding tag */
|
char *tag; /* cache binding tag */
|
||||||
refcount_t unbind_pincount;/* refcount to do daemon unbind */
|
refcount_t unbind_pincount;/* refcount to do daemon unbind */
|
||||||
struct xarray reqs; /* xarray of pending on-demand requests */
|
struct xarray reqs; /* xarray of pending on-demand requests */
|
||||||
|
unsigned long req_id_next;
|
||||||
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
|
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
|
||||||
u32 ondemand_id_next;
|
u32 ondemand_id_next;
|
||||||
};
|
};
|
||||||
|
@ -242,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
|||||||
unsigned long id = 0;
|
unsigned long id = 0;
|
||||||
size_t n;
|
size_t n;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
XA_STATE(xas, &cache->reqs, 0);
|
XA_STATE(xas, &cache->reqs, cache->req_id_next);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search for a request that has not ever been processed, to prevent
|
* Cyclically search for a request that has not ever been processed,
|
||||||
* requests from being processed repeatedly.
|
* to prevent requests from being processed repeatedly, and make
|
||||||
|
* request distribution fair.
|
||||||
*/
|
*/
|
||||||
xa_lock(&cache->reqs);
|
xa_lock(&cache->reqs);
|
||||||
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
|
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
|
||||||
|
if (!req && cache->req_id_next > 0) {
|
||||||
|
xas_set(&xas, 0);
|
||||||
|
req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
|
||||||
|
}
|
||||||
if (!req) {
|
if (!req) {
|
||||||
xa_unlock(&cache->reqs);
|
xa_unlock(&cache->reqs);
|
||||||
return 0;
|
return 0;
|
||||||
@ -264,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
|||||||
}
|
}
|
||||||
|
|
||||||
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
|
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
|
||||||
|
cache->req_id_next = xas.xa_index + 1;
|
||||||
xa_unlock(&cache->reqs);
|
xa_unlock(&cache->reqs);
|
||||||
|
|
||||||
id = xas.xa_index;
|
id = xas.xa_index;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user