mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
blk-cgroup: replace bio_blkcg with bio_blkcg_css
All callers of bio_blkcg actually want the CSS, so replace it with an interface that does return the CSS. This now allows to move struct blkcg_gq to block/blk-cgroup.h instead of exposing it in a public header. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Tejun Heo <tj@kernel.org> Link: https://lore.kernel.org/r/20220420042723.1010598-10-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f4a6a61cb6
commit
bbb1ebe7a9
@ -155,6 +155,22 @@ static void blkg_async_bio_workfn(struct work_struct *work)
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_blkcg_css - return the blkcg CSS associated with a bio
|
||||
* @bio: target bio
|
||||
*
|
||||
* This returns the CSS for the blkcg associated with a bio, or %NULL if not
|
||||
* associated. Callers are expected to either handle %NULL or know association
|
||||
* has been done prior to calling this.
|
||||
*/
|
||||
struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
|
||||
{
|
||||
if (!bio || !bio->bi_blkg)
|
||||
return NULL;
|
||||
return &bio->bi_blkg->blkcg->css;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_blkcg_css);
|
||||
|
||||
/**
|
||||
* blkcg_parent - get the parent of a blkcg
|
||||
* @blkcg: blkcg of interest
|
||||
@ -1938,7 +1954,7 @@ void bio_associate_blkg(struct bio *bio)
|
||||
rcu_read_lock();
|
||||
|
||||
if (bio->bi_blkg)
|
||||
css = &bio_blkcg(bio)->css;
|
||||
css = bio_blkcg_css(bio);
|
||||
else
|
||||
css = blkcg_css();
|
||||
|
||||
|
@ -25,6 +25,64 @@ struct blkg_policy_data;
|
||||
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
||||
enum blkg_iostat_type {
|
||||
BLKG_IOSTAT_READ,
|
||||
BLKG_IOSTAT_WRITE,
|
||||
BLKG_IOSTAT_DISCARD,
|
||||
|
||||
BLKG_IOSTAT_NR,
|
||||
};
|
||||
|
||||
struct blkg_iostat {
|
||||
u64 bytes[BLKG_IOSTAT_NR];
|
||||
u64 ios[BLKG_IOSTAT_NR];
|
||||
};
|
||||
|
||||
struct blkg_iostat_set {
|
||||
struct u64_stats_sync sync;
|
||||
struct blkg_iostat cur;
|
||||
struct blkg_iostat last;
|
||||
};
|
||||
|
||||
/* association between a blk cgroup and a request queue */
|
||||
struct blkcg_gq {
|
||||
/* Pointer to the associated request_queue */
|
||||
struct request_queue *q;
|
||||
struct list_head q_node;
|
||||
struct hlist_node blkcg_node;
|
||||
struct blkcg *blkcg;
|
||||
|
||||
/* all non-root blkcg_gq's are guaranteed to have access to parent */
|
||||
struct blkcg_gq *parent;
|
||||
|
||||
/* reference count */
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* is this blkg online? protected by both blkcg and q locks */
|
||||
bool online;
|
||||
|
||||
struct blkg_iostat_set __percpu *iostat_cpu;
|
||||
struct blkg_iostat_set iostat;
|
||||
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
|
||||
spinlock_t async_bio_lock;
|
||||
struct bio_list async_bios;
|
||||
union {
|
||||
struct work_struct async_bio_work;
|
||||
struct work_struct free_work;
|
||||
};
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
atomic64_t delay_start;
|
||||
u64 last_delay;
|
||||
int last_use;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct blkcg {
|
||||
struct cgroup_subsys_state css;
|
||||
spinlock_t lock;
|
||||
@ -173,9 +231,9 @@ static inline struct cgroup_subsys_state *blkcg_css(void)
|
||||
*
|
||||
* In order to avoid priority inversions we sometimes need to issue a bio as if
|
||||
* it were attached to the root blkg, and then backcharge to the actual owning
|
||||
* blkg. The idea is we do bio_blkcg() to look up the actual context for the
|
||||
* bio and attach the appropriate blkg to the bio. Then we call this helper and
|
||||
* if it is true run with the root blkg for that queue and then do any
|
||||
* blkg. The idea is we do bio_blkcg_css() to look up the actual context for
|
||||
* the bio and attach the appropriate blkg to the bio. Then we call this helper
|
||||
* and if it is true run with the root blkg for that queue and then do any
|
||||
* backcharging to the originating cgroup once the io is complete.
|
||||
*/
|
||||
static inline bool bio_issue_as_root_blkg(struct bio *bio)
|
||||
@ -464,6 +522,9 @@ struct blkcg_policy_data {
|
||||
struct blkcg_policy {
|
||||
};
|
||||
|
||||
struct blkcg {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
|
@ -1829,12 +1829,14 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
cmd->blkcg_css = NULL;
|
||||
cmd->memcg_css = NULL;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
if (rq->bio && rq->bio->bi_blkg) {
|
||||
cmd->blkcg_css = &bio_blkcg(rq->bio)->css;
|
||||
if (rq->bio) {
|
||||
cmd->blkcg_css = bio_blkcg_css(rq->bio);
|
||||
#ifdef CONFIG_MEMCG
|
||||
cmd->memcg_css =
|
||||
cgroup_get_e_css(cmd->blkcg_css->cgroup,
|
||||
&memory_cgrp_subsys);
|
||||
if (cmd->blkcg_css) {
|
||||
cmd->memcg_css =
|
||||
cgroup_get_e_css(cmd->blkcg_css->cgroup,
|
||||
&memory_cgrp_subsys);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -28,94 +28,18 @@
|
||||
#define FC_APPID_LEN 129
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
||||
enum blkg_iostat_type {
|
||||
BLKG_IOSTAT_READ,
|
||||
BLKG_IOSTAT_WRITE,
|
||||
BLKG_IOSTAT_DISCARD,
|
||||
|
||||
BLKG_IOSTAT_NR,
|
||||
};
|
||||
|
||||
struct blkg_iostat {
|
||||
u64 bytes[BLKG_IOSTAT_NR];
|
||||
u64 ios[BLKG_IOSTAT_NR];
|
||||
};
|
||||
|
||||
struct blkg_iostat_set {
|
||||
struct u64_stats_sync sync;
|
||||
struct blkg_iostat cur;
|
||||
struct blkg_iostat last;
|
||||
};
|
||||
|
||||
/* association between a blk cgroup and a request queue */
|
||||
struct blkcg_gq {
|
||||
/* Pointer to the associated request_queue */
|
||||
struct request_queue *q;
|
||||
struct list_head q_node;
|
||||
struct hlist_node blkcg_node;
|
||||
struct blkcg *blkcg;
|
||||
|
||||
/* all non-root blkcg_gq's are guaranteed to have access to parent */
|
||||
struct blkcg_gq *parent;
|
||||
|
||||
/* reference count */
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* is this blkg online? protected by both blkcg and q locks */
|
||||
bool online;
|
||||
|
||||
struct blkg_iostat_set __percpu *iostat_cpu;
|
||||
struct blkg_iostat_set iostat;
|
||||
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
|
||||
spinlock_t async_bio_lock;
|
||||
struct bio_list async_bios;
|
||||
union {
|
||||
struct work_struct async_bio_work;
|
||||
struct work_struct free_work;
|
||||
};
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
atomic64_t delay_start;
|
||||
u64 last_delay;
|
||||
int last_use;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||
|
||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
|
||||
void blkcg_maybe_throttle_current(void);
|
||||
|
||||
/**
|
||||
* bio_blkcg - grab the blkcg associated with a bio
|
||||
* @bio: target bio
|
||||
*
|
||||
* This returns the blkcg associated with a bio, %NULL if not associated.
|
||||
* Callers are expected to either handle %NULL or know association has been
|
||||
* done prior to calling this.
|
||||
*/
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
{
|
||||
if (bio && bio->bi_blkg)
|
||||
return bio->bi_blkg->blkcg;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool blk_cgroup_congested(void);
|
||||
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
|
||||
void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
|
||||
struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
|
||||
struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
|
||||
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
|
||||
struct blkcg_gq {
|
||||
};
|
||||
|
||||
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
|
||||
|
||||
static inline void blkcg_maybe_throttle_current(void) { }
|
||||
@ -123,7 +47,10 @@ static inline bool blk_cgroup_congested(void) { return false; }
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
||||
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
@ -783,6 +783,7 @@ void blk_trace_shutdown(struct request_queue *q)
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct cgroup_subsys_state *blkcg_css;
|
||||
struct blk_trace *bt;
|
||||
|
||||
/* We don't use the 'bt' value here except as an optimization... */
|
||||
@ -790,9 +791,10 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
|
||||
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
|
||||
return 0;
|
||||
|
||||
if (!bio->bi_blkg)
|
||||
blkcg_css = bio_blkcg_css(bio);
|
||||
if (!blkcg_css)
|
||||
return 0;
|
||||
return cgroup_id(bio_blkcg(bio)->css.cgroup);
|
||||
return cgroup_id(blkcg_css->cgroup);
|
||||
}
|
||||
#else
|
||||
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
|
||||
|
Loading…
Reference in New Issue
Block a user