mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
blkcg: change blkg reference counting to use percpu_ref
Every bio is now associated with a blkg putting blkg_get, blkg_try_get, and blkg_put on the hot path. Switch over the refcnt in blkg to use percpu_ref. Signed-off-by: Dennis Zhou <dennis@kernel.org> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6f70fb6618
commit
7fcf2b033b
@ -81,6 +81,37 @@ static void blkg_free(struct blkcg_gq *blkg)
|
||||
kfree(blkg);
|
||||
}
|
||||
|
||||
static void __blkg_release(struct rcu_head *rcu)
|
||||
{
|
||||
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
|
||||
|
||||
percpu_ref_exit(&blkg->refcnt);
|
||||
|
||||
/* release the blkcg and parent blkg refs this blkg has been holding */
|
||||
css_put(&blkg->blkcg->css);
|
||||
if (blkg->parent)
|
||||
blkg_put(blkg->parent);
|
||||
|
||||
wb_congested_put(blkg->wb_congested);
|
||||
|
||||
blkg_free(blkg);
|
||||
}
|
||||
|
||||
/*
|
||||
* A group is RCU protected, but having an rcu lock does not mean that one
|
||||
* can access all the fields of blkg and assume these are valid. For
|
||||
* example, don't try to follow throtl_data and request queue links.
|
||||
*
|
||||
* Having a reference to blkg under an rcu allows accesses to only values
|
||||
* local to groups like group stats and group rate limits.
|
||||
*/
|
||||
static void blkg_release(struct percpu_ref *ref)
|
||||
{
|
||||
struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
|
||||
|
||||
call_rcu(&blkg->rcu_head, __blkg_release);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_alloc - allocate a blkg
|
||||
* @blkcg: block cgroup the new blkg is associated with
|
||||
@ -107,7 +138,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
|
||||
blkg->q = q;
|
||||
INIT_LIST_HEAD(&blkg->q_node);
|
||||
blkg->blkcg = blkcg;
|
||||
atomic_set(&blkg->refcnt, 1);
|
||||
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
@ -207,6 +237,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
||||
blkg_get(blkg->parent);
|
||||
}
|
||||
|
||||
ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (ret)
|
||||
goto err_cancel_ref;
|
||||
|
||||
/* invoke per-policy init */
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
@ -239,6 +274,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
||||
blkg_put(blkg);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
err_cancel_ref:
|
||||
percpu_ref_exit(&blkg->refcnt);
|
||||
err_put_congested:
|
||||
wb_congested_put(wb_congested);
|
||||
err_put_css:
|
||||
@ -367,7 +404,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
|
||||
* Put the reference taken at the time of creation so that when all
|
||||
* queues are gone, group can be destroyed.
|
||||
*/
|
||||
blkg_put(blkg);
|
||||
percpu_ref_kill(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -124,7 +124,7 @@ struct blkcg_gq {
|
||||
struct blkcg_gq *parent;
|
||||
|
||||
/* reference count */
|
||||
atomic_t refcnt;
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* is this blkg online? protected by both blkcg and q locks */
|
||||
bool online;
|
||||
@ -487,8 +487,7 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
*/
|
||||
static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
||||
atomic_inc(&blkg->refcnt);
|
||||
percpu_ref_get(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -500,7 +499,7 @@ static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (atomic_inc_not_zero(&blkg->refcnt))
|
||||
if (percpu_ref_tryget(&blkg->refcnt))
|
||||
return blkg;
|
||||
return NULL;
|
||||
}
|
||||
@ -514,23 +513,19 @@ static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_try_get_closest(struct blkcg_gq *blkg)
|
||||
{
|
||||
while (!atomic_inc_not_zero(&blkg->refcnt))
|
||||
while (!percpu_ref_tryget(&blkg->refcnt))
|
||||
blkg = blkg->parent;
|
||||
|
||||
return blkg;
|
||||
}
|
||||
|
||||
void __blkg_release_rcu(struct rcu_head *rcu);
|
||||
|
||||
/**
|
||||
* blkg_put - put a blkg reference
|
||||
* @blkg: blkg to put
|
||||
*/
|
||||
static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
{
|
||||
WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
||||
if (atomic_dec_and_test(&blkg->refcnt))
|
||||
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
|
||||
percpu_ref_put(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user