mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
blk-throttle: remove CONFIG_BLK_DEV_THROTTLING_LOW
One the one hand, it's marked EXPERIMENTAL since 2017, and looks like there are no users since then, and no testers and no developers, it's just not active at all. On the other hand, even if the config is disabled, there are still many fields in throtl_grp and throtl_data and many functions that are only used for throtl low. At last, currently blk-throtl is initialized during disk initialization, and destroyed during disk removal, and it exposes many functions to be called directly from block layer. Remove throtl low to make code much more cleaner and follow up work much easier. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Acked-by: Tejun Heo <tj@kernel.org> Link: https://lore.kernel.org/r/20240509121107.3195568-2-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7be835694d
commit
bf20ab538c
@ -594,18 +594,6 @@ Description:
|
|||||||
the data. If no such restriction exists, this file will contain
|
the data. If no such restriction exists, this file will contain
|
||||||
'0'. This file is writable for testing purposes.
|
'0'. This file is writable for testing purposes.
|
||||||
|
|
||||||
|
|
||||||
What: /sys/block/<disk>/queue/throttle_sample_time
|
|
||||||
Date: March 2017
|
|
||||||
Contact: linux-block@vger.kernel.org
|
|
||||||
Description:
|
|
||||||
[RW] This is the time window that blk-throttle samples data, in
|
|
||||||
millisecond. blk-throttle makes decision based on the
|
|
||||||
samplings. Lower time means cgroups have more smooth throughput,
|
|
||||||
but higher CPU overhead. This exists only when
|
|
||||||
CONFIG_BLK_DEV_THROTTLING_LOW is enabled.
|
|
||||||
|
|
||||||
|
|
||||||
What: /sys/block/<disk>/queue/virt_boundary_mask
|
What: /sys/block/<disk>/queue/virt_boundary_mask
|
||||||
Date: April 2021
|
Date: April 2021
|
||||||
Contact: linux-block@vger.kernel.org
|
Contact: linux-block@vger.kernel.org
|
||||||
|
@ -76,7 +76,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||||||
CONFIG_MODVERSIONS=y
|
CONFIG_MODVERSIONS=y
|
||||||
CONFIG_BLK_DEV_ZONED=y
|
CONFIG_BLK_DEV_ZONED=y
|
||||||
CONFIG_BLK_DEV_THROTTLING=y
|
CONFIG_BLK_DEV_THROTTLING=y
|
||||||
CONFIG_BLK_DEV_THROTTLING_LOW=y
|
|
||||||
CONFIG_BLK_WBT=y
|
CONFIG_BLK_WBT=y
|
||||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||||
CONFIG_BLK_CGROUP_FC_APPID=y
|
CONFIG_BLK_CGROUP_FC_APPID=y
|
||||||
|
@ -119,17 +119,6 @@ config BLK_DEV_THROTTLING
|
|||||||
|
|
||||||
See Documentation/admin-guide/cgroup-v1/blkio-controller.rst for more information.
|
See Documentation/admin-guide/cgroup-v1/blkio-controller.rst for more information.
|
||||||
|
|
||||||
config BLK_DEV_THROTTLING_LOW
|
|
||||||
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
|
|
||||||
depends on BLK_DEV_THROTTLING
|
|
||||||
help
|
|
||||||
Add .low limit interface for block throttling. The low limit is a best
|
|
||||||
effort limit to prioritize cgroups. Depending on the setting, the limit
|
|
||||||
can be used to protect cgroups in terms of bandwidth/iops and better
|
|
||||||
utilize disk resource.
|
|
||||||
|
|
||||||
Note, this is an experimental interface and could be changed someday.
|
|
||||||
|
|
||||||
config BLK_WBT
|
config BLK_WBT
|
||||||
bool "Enable support for block device writeback throttling"
|
bool "Enable support for block device writeback throttling"
|
||||||
help
|
help
|
||||||
|
@ -1629,7 +1629,6 @@ void bio_endio(struct bio *bio)
|
|||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_throtl_bio_endio(bio);
|
|
||||||
/* release cgroup info */
|
/* release cgroup info */
|
||||||
bio_uninit(bio);
|
bio_uninit(bio);
|
||||||
if (bio->bi_end_io)
|
if (bio->bi_end_io)
|
||||||
|
@ -57,9 +57,6 @@ void blk_stat_add(struct request *rq, u64 now)
|
|||||||
|
|
||||||
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
|
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
|
||||||
|
|
||||||
if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
|
|
||||||
blk_throtl_stat_add(rq, value);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
|
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
|
||||||
|
@ -516,10 +516,6 @@ QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
|
|||||||
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
|
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
|
||||||
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
|
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
|
||||||
QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* legacy alias for logical_block_size: */
|
/* legacy alias for logical_block_size: */
|
||||||
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
|
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
|
||||||
.attr = {.name = "hw_sector_size", .mode = 0444 },
|
.attr = {.name = "hw_sector_size", .mode = 0444 },
|
||||||
@ -640,9 +636,6 @@ static struct attribute *queue_attrs[] = {
|
|||||||
&queue_fua_entry.attr,
|
&queue_fua_entry.attr,
|
||||||
&queue_dax_entry.attr,
|
&queue_dax_entry.attr,
|
||||||
&queue_poll_delay_entry.attr,
|
&queue_poll_delay_entry.attr,
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
|
||||||
&blk_throtl_sample_time_entry.attr,
|
|
||||||
#endif
|
|
||||||
&queue_virt_boundary_mask_entry.attr,
|
&queue_virt_boundary_mask_entry.attr,
|
||||||
&queue_dma_alignment_entry.attr,
|
&queue_dma_alignment_entry.attr,
|
||||||
NULL,
|
NULL,
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -58,12 +58,6 @@ enum tg_state_flags {
|
|||||||
THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
|
THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
|
||||||
LIMIT_LOW,
|
|
||||||
LIMIT_MAX,
|
|
||||||
LIMIT_CNT,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct throtl_grp {
|
struct throtl_grp {
|
||||||
/* must be the first member */
|
/* must be the first member */
|
||||||
struct blkg_policy_data pd;
|
struct blkg_policy_data pd;
|
||||||
@ -102,14 +96,14 @@ struct throtl_grp {
|
|||||||
bool has_rules_iops[2];
|
bool has_rules_iops[2];
|
||||||
|
|
||||||
/* internally used bytes per second rate limits */
|
/* internally used bytes per second rate limits */
|
||||||
uint64_t bps[2][LIMIT_CNT];
|
uint64_t bps[2];
|
||||||
/* user configured bps limits */
|
/* user configured bps limits */
|
||||||
uint64_t bps_conf[2][LIMIT_CNT];
|
uint64_t bps_conf[2];
|
||||||
|
|
||||||
/* internally used IOPS limits */
|
/* internally used IOPS limits */
|
||||||
unsigned int iops[2][LIMIT_CNT];
|
unsigned int iops[2];
|
||||||
/* user configured IOPS limits */
|
/* user configured IOPS limits */
|
||||||
unsigned int iops_conf[2][LIMIT_CNT];
|
unsigned int iops_conf[2];
|
||||||
|
|
||||||
/* Number of bytes dispatched in current slice */
|
/* Number of bytes dispatched in current slice */
|
||||||
uint64_t bytes_disp[2];
|
uint64_t bytes_disp[2];
|
||||||
@ -132,22 +126,10 @@ struct throtl_grp {
|
|||||||
|
|
||||||
unsigned long last_check_time;
|
unsigned long last_check_time;
|
||||||
|
|
||||||
unsigned long latency_target; /* us */
|
|
||||||
unsigned long latency_target_conf; /* us */
|
|
||||||
/* When did we start a new slice */
|
/* When did we start a new slice */
|
||||||
unsigned long slice_start[2];
|
unsigned long slice_start[2];
|
||||||
unsigned long slice_end[2];
|
unsigned long slice_end[2];
|
||||||
|
|
||||||
unsigned long last_finish_time; /* ns / 1024 */
|
|
||||||
unsigned long checked_last_finish_time; /* ns / 1024 */
|
|
||||||
unsigned long avg_idletime; /* ns / 1024 */
|
|
||||||
unsigned long idletime_threshold; /* us */
|
|
||||||
unsigned long idletime_threshold_conf; /* us */
|
|
||||||
|
|
||||||
unsigned int bio_cnt; /* total bios */
|
|
||||||
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
|
|
||||||
unsigned long bio_cnt_reset_time;
|
|
||||||
|
|
||||||
struct blkg_rwstat stat_bytes;
|
struct blkg_rwstat stat_bytes;
|
||||||
struct blkg_rwstat stat_ios;
|
struct blkg_rwstat stat_ios;
|
||||||
};
|
};
|
||||||
|
11
block/blk.h
11
block/blk.h
@ -388,17 +388,6 @@ static inline void ioc_clear_queue(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_BLK_ICQ */
|
#endif /* CONFIG_BLK_ICQ */
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
|
||||||
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
|
|
||||||
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
|
|
||||||
const char *page, size_t count);
|
|
||||||
extern void blk_throtl_bio_endio(struct bio *bio);
|
|
||||||
extern void blk_throtl_stat_add(struct request *rq, u64 time);
|
|
||||||
#else
|
|
||||||
static inline void blk_throtl_bio_endio(struct bio *bio) { }
|
|
||||||
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
|
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
|
||||||
|
|
||||||
static inline bool blk_queue_may_bounce(struct request_queue *q)
|
static inline bool blk_queue_may_bounce(struct request_queue *q)
|
||||||
|
Loading…
Reference in New Issue
Block a user