mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
Revert "blk-cgroup: move the cgroup information to struct gendisk"
This reverts commit 3f13ab7c80
as a patch
it depends on caused a few problems.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230214183308.1658775-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2f1e07dda1
commit
1231039db3
@ -999,7 +999,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
|
|||||||
{
|
{
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
|
|
||||||
list_for_each_entry(blkg, &bfqd->queue->disk->blkg_list, entry) {
|
list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
|
||||||
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
|
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
|
||||||
|
|
||||||
bfq_end_wr_async_queues(bfqd, bfqg);
|
bfq_end_wr_async_queues(bfqd, bfqg);
|
||||||
@ -1293,7 +1293,7 @@ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return blkg_to_bfqg(bfqd->queue->disk->root_blkg);
|
return blkg_to_bfqg(bfqd->queue->root_blkg);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct blkcg_policy blkcg_policy_bfq = {
|
struct blkcg_policy blkcg_policy_bfq = {
|
||||||
|
@ -108,10 +108,10 @@ static struct cgroup_subsys_state *blkcg_css(void)
|
|||||||
return task_css(current, io_cgrp_id);
|
return task_css(current, io_cgrp_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blkcg_policy_enabled(struct gendisk *disk,
|
static bool blkcg_policy_enabled(struct request_queue *q,
|
||||||
const struct blkcg_policy *pol)
|
const struct blkcg_policy *pol)
|
||||||
{
|
{
|
||||||
return pol && test_bit(pol->plid, disk->blkcg_pols);
|
return pol && test_bit(pol->plid, q->blkcg_pols);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blkg_free_workfn(struct work_struct *work)
|
static void blkg_free_workfn(struct work_struct *work)
|
||||||
@ -123,18 +123,18 @@ static void blkg_free_workfn(struct work_struct *work)
|
|||||||
/*
|
/*
|
||||||
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
|
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
|
||||||
* in order to make sure pd_free_fn() is called in order, the deletion
|
* in order to make sure pd_free_fn() is called in order, the deletion
|
||||||
* of the list blkg->entry is delayed to here from blkg_destroy(), and
|
* of the list blkg->q_node is delayed to here from blkg_destroy(), and
|
||||||
* blkcg_mutex is used to synchronize blkg_free_workfn() and
|
* blkcg_mutex is used to synchronize blkg_free_workfn() and
|
||||||
* blkcg_deactivate_policy().
|
* blkcg_deactivate_policy().
|
||||||
*/
|
*/
|
||||||
mutex_lock(&blkg->disk->blkcg_mutex);
|
mutex_lock(&blkg->disk->queue->blkcg_mutex);
|
||||||
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
||||||
if (blkg->pd[i])
|
if (blkg->pd[i])
|
||||||
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
||||||
if (blkg->parent)
|
if (blkg->parent)
|
||||||
blkg_put(blkg->parent);
|
blkg_put(blkg->parent);
|
||||||
list_del_init(&blkg->entry);
|
list_del_init(&blkg->q_node);
|
||||||
mutex_unlock(&blkg->disk->blkcg_mutex);
|
mutex_unlock(&blkg->disk->queue->blkcg_mutex);
|
||||||
|
|
||||||
put_disk(blkg->disk);
|
put_disk(blkg->disk);
|
||||||
free_percpu(blkg->iostat_cpu);
|
free_percpu(blkg->iostat_cpu);
|
||||||
@ -269,7 +269,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
|
|||||||
get_device(disk_to_dev(disk));
|
get_device(disk_to_dev(disk));
|
||||||
blkg->disk = disk;
|
blkg->disk = disk;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&blkg->entry);
|
INIT_LIST_HEAD(&blkg->q_node);
|
||||||
spin_lock_init(&blkg->async_bio_lock);
|
spin_lock_init(&blkg->async_bio_lock);
|
||||||
bio_list_init(&blkg->async_bios);
|
bio_list_init(&blkg->async_bios);
|
||||||
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
|
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
|
||||||
@ -285,7 +285,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
|
|||||||
struct blkcg_policy *pol = blkcg_policy[i];
|
struct blkcg_policy *pol = blkcg_policy[i];
|
||||||
struct blkg_policy_data *pd;
|
struct blkg_policy_data *pd;
|
||||||
|
|
||||||
if (!blkcg_policy_enabled(disk, pol))
|
if (!blkcg_policy_enabled(disk->queue, pol))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* alloc per-policy data and attach it to blkg */
|
/* alloc per-policy data and attach it to blkg */
|
||||||
@ -371,7 +371,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
|
|||||||
ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
|
ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
|
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
|
||||||
list_add(&blkg->entry, &disk->blkg_list);
|
list_add(&blkg->q_node, &disk->queue->blkg_list);
|
||||||
|
|
||||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||||
struct blkcg_policy *pol = blkcg_policy[i];
|
struct blkcg_policy *pol = blkcg_policy[i];
|
||||||
@ -444,7 +444,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
|||||||
while (true) {
|
while (true) {
|
||||||
struct blkcg *pos = blkcg;
|
struct blkcg *pos = blkcg;
|
||||||
struct blkcg *parent = blkcg_parent(blkcg);
|
struct blkcg *parent = blkcg_parent(blkcg);
|
||||||
struct blkcg_gq *ret_blkg = disk->root_blkg;
|
struct blkcg_gq *ret_blkg = q->root_blkg;
|
||||||
|
|
||||||
while (parent) {
|
while (parent) {
|
||||||
blkg = blkg_lookup(parent, disk);
|
blkg = blkg_lookup(parent, disk);
|
||||||
@ -526,7 +526,7 @@ static void blkg_destroy_all(struct gendisk *disk)
|
|||||||
|
|
||||||
restart:
|
restart:
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
list_for_each_entry_safe(blkg, n, &disk->blkg_list, entry) {
|
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
|
||||||
struct blkcg *blkcg = blkg->blkcg;
|
struct blkcg *blkcg = blkg->blkcg;
|
||||||
|
|
||||||
spin_lock(&blkcg->lock);
|
spin_lock(&blkcg->lock);
|
||||||
@ -545,7 +545,7 @@ static void blkg_destroy_all(struct gendisk *disk)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
disk->root_blkg = NULL;
|
q->root_blkg = NULL;
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -620,7 +620,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
|
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
|
||||||
spin_lock_irq(&blkg->disk->queue->queue_lock);
|
spin_lock_irq(&blkg->disk->queue->queue_lock);
|
||||||
if (blkcg_policy_enabled(blkg->disk, pol))
|
if (blkcg_policy_enabled(blkg->disk->queue, pol))
|
||||||
total += prfill(sf, blkg->pd[pol->plid], data);
|
total += prfill(sf, blkg->pd[pol->plid], data);
|
||||||
spin_unlock_irq(&blkg->disk->queue->queue_lock);
|
spin_unlock_irq(&blkg->disk->queue->queue_lock);
|
||||||
}
|
}
|
||||||
@ -728,7 +728,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
if (!blkcg_policy_enabled(disk, pol)) {
|
if (!blkcg_policy_enabled(q, pol)) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EOPNOTSUPP;
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
}
|
}
|
||||||
@ -771,7 +771,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
if (!blkcg_policy_enabled(disk, pol)) {
|
if (!blkcg_policy_enabled(q, pol)) {
|
||||||
blkg_free(new_blkg);
|
blkg_free(new_blkg);
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EOPNOTSUPP;
|
||||||
goto fail_preloaded;
|
goto fail_preloaded;
|
||||||
@ -951,7 +951,7 @@ static void blkcg_fill_root_iostats(void)
|
|||||||
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
|
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
|
||||||
while ((dev = class_dev_iter_next(&iter))) {
|
while ((dev = class_dev_iter_next(&iter))) {
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
struct blkcg_gq *blkg = bdev->bd_disk->root_blkg;
|
struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
|
||||||
struct blkg_iostat tmp;
|
struct blkg_iostat tmp;
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -1298,8 +1298,8 @@ int blkcg_init_disk(struct gendisk *disk)
|
|||||||
bool preloaded;
|
bool preloaded;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&disk->blkg_list);
|
INIT_LIST_HEAD(&q->blkg_list);
|
||||||
mutex_init(&disk->blkcg_mutex);
|
mutex_init(&q->blkcg_mutex);
|
||||||
|
|
||||||
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
|
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
|
||||||
if (!new_blkg)
|
if (!new_blkg)
|
||||||
@ -1313,7 +1313,7 @@ int blkcg_init_disk(struct gendisk *disk)
|
|||||||
blkg = blkg_create(&blkcg_root, disk, new_blkg);
|
blkg = blkg_create(&blkcg_root, disk, new_blkg);
|
||||||
if (IS_ERR(blkg))
|
if (IS_ERR(blkg))
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
disk->root_blkg = blkg;
|
q->root_blkg = blkg;
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
|
||||||
if (preloaded)
|
if (preloaded)
|
||||||
@ -1426,7 +1426,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||||||
struct blkcg_gq *blkg, *pinned_blkg = NULL;
|
struct blkcg_gq *blkg, *pinned_blkg = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (blkcg_policy_enabled(disk, pol))
|
if (blkcg_policy_enabled(q, pol))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
@ -1435,7 +1435,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
/* blkg_list is pushed at the head, reverse walk to allocate parents first */
|
/* blkg_list is pushed at the head, reverse walk to allocate parents first */
|
||||||
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
|
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
|
||||||
struct blkg_policy_data *pd;
|
struct blkg_policy_data *pd;
|
||||||
|
|
||||||
if (blkg->pd[pol->plid])
|
if (blkg->pd[pol->plid])
|
||||||
@ -1480,16 +1480,16 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||||||
|
|
||||||
/* all allocated, init in the same order */
|
/* all allocated, init in the same order */
|
||||||
if (pol->pd_init_fn)
|
if (pol->pd_init_fn)
|
||||||
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry)
|
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
|
||||||
pol->pd_init_fn(blkg->pd[pol->plid]);
|
pol->pd_init_fn(blkg->pd[pol->plid]);
|
||||||
|
|
||||||
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
|
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
|
||||||
if (pol->pd_online_fn)
|
if (pol->pd_online_fn)
|
||||||
pol->pd_online_fn(blkg->pd[pol->plid]);
|
pol->pd_online_fn(blkg->pd[pol->plid]);
|
||||||
blkg->pd[pol->plid]->online = true;
|
blkg->pd[pol->plid]->online = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
__set_bit(pol->plid, disk->blkcg_pols);
|
__set_bit(pol->plid, q->blkcg_pols);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
@ -1505,7 +1505,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||||||
enomem:
|
enomem:
|
||||||
/* alloc failed, nothing's initialized yet, free everything */
|
/* alloc failed, nothing's initialized yet, free everything */
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
list_for_each_entry(blkg, &disk->blkg_list, entry) {
|
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||||
struct blkcg *blkcg = blkg->blkcg;
|
struct blkcg *blkcg = blkg->blkcg;
|
||||||
|
|
||||||
spin_lock(&blkcg->lock);
|
spin_lock(&blkcg->lock);
|
||||||
@ -1535,18 +1535,18 @@ void blkcg_deactivate_policy(struct gendisk *disk,
|
|||||||
struct request_queue *q = disk->queue;
|
struct request_queue *q = disk->queue;
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
|
|
||||||
if (!blkcg_policy_enabled(disk, pol))
|
if (!blkcg_policy_enabled(q, pol))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
|
|
||||||
mutex_lock(&disk->blkcg_mutex);
|
mutex_lock(&q->blkcg_mutex);
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
__clear_bit(pol->plid, disk->blkcg_pols);
|
__clear_bit(pol->plid, q->blkcg_pols);
|
||||||
|
|
||||||
list_for_each_entry(blkg, &disk->blkg_list, entry) {
|
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||||
struct blkcg *blkcg = blkg->blkcg;
|
struct blkcg *blkcg = blkg->blkcg;
|
||||||
|
|
||||||
spin_lock(&blkcg->lock);
|
spin_lock(&blkcg->lock);
|
||||||
@ -1560,7 +1560,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
mutex_unlock(&disk->blkcg_mutex);
|
mutex_unlock(&q->blkcg_mutex);
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
@ -1957,7 +1957,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
|
|||||||
* Associate @bio with the blkg found by combining the css's blkg and the
|
* Associate @bio with the blkg found by combining the css's blkg and the
|
||||||
* request_queue of the @bio. An association failure is handled by walking up
|
* request_queue of the @bio. An association failure is handled by walking up
|
||||||
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
|
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
|
||||||
* and disk->root_blkg. This situation only happens when a cgroup is dying and
|
* and q->root_blkg. This situation only happens when a cgroup is dying and
|
||||||
* then the remaining bios will spill to the closest alive blkg.
|
* then the remaining bios will spill to the closest alive blkg.
|
||||||
*
|
*
|
||||||
* A reference will be taken on the blkg and will be released when @bio is
|
* A reference will be taken on the blkg and will be released when @bio is
|
||||||
@ -1972,8 +1972,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
|
|||||||
if (css && css->parent) {
|
if (css && css->parent) {
|
||||||
bio->bi_blkg = blkg_tryget_closest(bio, css);
|
bio->bi_blkg = blkg_tryget_closest(bio, css);
|
||||||
} else {
|
} else {
|
||||||
blkg_get(bio->bi_bdev->bd_disk->root_blkg);
|
blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
|
||||||
bio->bi_blkg = bio->bi_bdev->bd_disk->root_blkg;
|
bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
||||||
|
@ -54,7 +54,7 @@ struct blkg_iostat_set {
|
|||||||
/* association between a blk cgroup and a request queue */
|
/* association between a blk cgroup and a request queue */
|
||||||
struct blkcg_gq {
|
struct blkcg_gq {
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
struct list_head entry;
|
struct list_head q_node;
|
||||||
struct hlist_node blkcg_node;
|
struct hlist_node blkcg_node;
|
||||||
struct blkcg *blkcg;
|
struct blkcg *blkcg;
|
||||||
|
|
||||||
@ -250,7 +250,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
|
|||||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||||
|
|
||||||
if (blkcg == &blkcg_root)
|
if (blkcg == &blkcg_root)
|
||||||
return disk->root_blkg;
|
return disk->queue->root_blkg;
|
||||||
|
|
||||||
blkg = rcu_dereference(blkcg->blkg_hint);
|
blkg = rcu_dereference(blkcg->blkg_hint);
|
||||||
if (blkg && blkg->disk == disk)
|
if (blkg && blkg->disk == disk)
|
||||||
|
@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_pre(blkg, pos_css,
|
blkg_for_each_descendant_pre(blkg, pos_css,
|
||||||
blkiolat->rqos.disk->root_blkg) {
|
blkiolat->rqos.disk->queue->root_blkg) {
|
||||||
struct iolatency_grp *iolat;
|
struct iolatency_grp *iolat;
|
||||||
struct child_latency_info *lat_info;
|
struct child_latency_info *lat_info;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -451,8 +451,7 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
|
|||||||
bool low_valid = false;
|
bool low_valid = false;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_post(blkg, pos_css,
|
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
|
||||||
td->queue->disk->root_blkg) {
|
|
||||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
|
|
||||||
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
|
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
|
||||||
@ -1181,7 +1180,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
|
|||||||
|
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
if (!q->disk->root_blkg)
|
if (!q->root_blkg)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (throtl_can_upgrade(td, NULL))
|
if (throtl_can_upgrade(td, NULL))
|
||||||
@ -1323,8 +1322,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
|
|||||||
* blk-throttle.
|
* blk-throttle.
|
||||||
*/
|
*/
|
||||||
blkg_for_each_descendant_pre(blkg, pos_css,
|
blkg_for_each_descendant_pre(blkg, pos_css,
|
||||||
global ? tg->td->queue->disk->root_blkg :
|
global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
|
||||||
tg_to_blkg(tg)) {
|
|
||||||
struct throtl_grp *this_tg = blkg_to_tg(blkg);
|
struct throtl_grp *this_tg = blkg_to_tg(blkg);
|
||||||
struct throtl_grp *parent_tg;
|
struct throtl_grp *parent_tg;
|
||||||
|
|
||||||
@ -1719,7 +1717,7 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
|
|||||||
* path need RCU protection and to prevent warning from lockdep.
|
* path need RCU protection and to prevent warning from lockdep.
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_post(blkg, pos_css, disk->root_blkg) {
|
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
|
||||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
struct throtl_service_queue *sq = &tg->service_queue;
|
struct throtl_service_queue *sq = &tg->service_queue;
|
||||||
|
|
||||||
@ -1873,8 +1871,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_post(blkg, pos_css,
|
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
|
||||||
td->queue->disk->root_blkg) {
|
|
||||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
|
|
||||||
if (tg == this_tg)
|
if (tg == this_tg)
|
||||||
@ -1920,8 +1917,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
|
|||||||
td->low_upgrade_time = jiffies;
|
td->low_upgrade_time = jiffies;
|
||||||
td->scale = 0;
|
td->scale = 0;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_post(blkg, pos_css,
|
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
|
||||||
td->queue->disk->root_blkg) {
|
|
||||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
struct throtl_service_queue *sq = &tg->service_queue;
|
struct throtl_service_queue *sq = &tg->service_queue;
|
||||||
|
|
||||||
|
@ -163,12 +163,6 @@ struct gendisk {
|
|||||||
struct timer_rand_state *random;
|
struct timer_rand_state *random;
|
||||||
atomic_t sync_io; /* RAID */
|
atomic_t sync_io; /* RAID */
|
||||||
struct disk_events *ev;
|
struct disk_events *ev;
|
||||||
#ifdef CONFIG_BLK_CGROUP
|
|
||||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
|
||||||
struct blkcg_gq *root_blkg;
|
|
||||||
struct list_head blkg_list;
|
|
||||||
struct mutex blkcg_mutex;
|
|
||||||
#endif /* CONFIG_BLK_CGROUP */
|
|
||||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||||
struct kobject integrity_kobj;
|
struct kobject integrity_kobj;
|
||||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||||
@ -487,6 +481,12 @@ struct request_queue {
|
|||||||
struct blk_mq_tags *sched_shared_tags;
|
struct blk_mq_tags *sched_shared_tags;
|
||||||
|
|
||||||
struct list_head icq_list;
|
struct list_head icq_list;
|
||||||
|
#ifdef CONFIG_BLK_CGROUP
|
||||||
|
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||||
|
struct blkcg_gq *root_blkg;
|
||||||
|
struct list_head blkg_list;
|
||||||
|
struct mutex blkcg_mutex;
|
||||||
|
#endif
|
||||||
|
|
||||||
struct queue_limits limits;
|
struct queue_limits limits;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user