mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
blk-wbt: throttle discards like background writes
Throttle discards like we would any background write. Discards should be background activity, so if they are impacting foreground IO, then we will throttle them down. Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
8bea609019
commit
782f569774
@ -10,11 +10,11 @@
|
||||
|
||||
/*
|
||||
* from upper:
|
||||
* 3 bits: reserved for other usage
|
||||
* 4 bits: reserved for other usage
|
||||
* 12 bits: size
|
||||
* 49 bits: time
|
||||
* 48 bits: time
|
||||
*/
|
||||
#define BLK_STAT_RES_BITS 3
|
||||
#define BLK_STAT_RES_BITS 4
|
||||
#define BLK_STAT_SIZE_BITS 12
|
||||
#define BLK_STAT_RES_SHIFT (64 - BLK_STAT_RES_BITS)
|
||||
#define BLK_STAT_SIZE_SHIFT (BLK_STAT_RES_SHIFT - BLK_STAT_SIZE_BITS)
|
||||
|
@ -106,6 +106,8 @@ static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
|
||||
{
|
||||
if (wb_acct & WBT_KSWAPD)
|
||||
return &rwb->rq_wait[WBT_RWQ_KSWAPD];
|
||||
else if (wb_acct & WBT_DISCARD)
|
||||
return &rwb->rq_wait[WBT_RWQ_DISCARD];
|
||||
|
||||
return &rwb->rq_wait[WBT_RWQ_BG];
|
||||
}
|
||||
@ -143,10 +145,13 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the device does write back caching, drop further down
|
||||
* before we wake people up.
|
||||
* For discards, our limit is always the background. For writes, if
|
||||
* the device does write back caching, drop further down before we
|
||||
* wake people up.
|
||||
*/
|
||||
if (rwb->wc && !wb_recent_wait(rwb))
|
||||
if (wb_acct & WBT_DISCARD)
|
||||
limit = rwb->wb_background;
|
||||
else if (rwb->wc && !wb_recent_wait(rwb))
|
||||
limit = 0;
|
||||
else
|
||||
limit = rwb->wb_normal;
|
||||
@ -483,6 +488,9 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
||||
{
|
||||
unsigned int limit;
|
||||
|
||||
if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
|
||||
return rwb->wb_background;
|
||||
|
||||
/*
|
||||
* At this point we know it's a buffered write. If this is
|
||||
* kswapd trying to free memory, or REQ_SYNC is set, then
|
||||
@ -564,21 +572,20 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
||||
|
||||
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
||||
{
|
||||
const int op = bio_op(bio);
|
||||
|
||||
/*
|
||||
* If not a WRITE, do nothing
|
||||
*/
|
||||
if (op != REQ_OP_WRITE)
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_WRITE:
|
||||
/*
|
||||
* Don't throttle WRITE_ODIRECT
|
||||
*/
|
||||
if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
|
||||
(REQ_SYNC | REQ_IDLE))
|
||||
return false;
|
||||
/* fallthrough */
|
||||
case REQ_OP_DISCARD:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't throttle WRITE_ODIRECT
|
||||
*/
|
||||
if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -605,6 +612,8 @@ enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
|
||||
|
||||
if (current_is_kswapd())
|
||||
ret |= WBT_KSWAPD;
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
ret |= WBT_DISCARD;
|
||||
|
||||
__wbt_wait(rwb, ret, bio->bi_opf, lock);
|
||||
|
||||
|
@ -14,13 +14,15 @@ enum wbt_flags {
|
||||
WBT_TRACKED = 1, /* write, tracked for throttling */
|
||||
WBT_READ = 2, /* read */
|
||||
WBT_KSWAPD = 4, /* write, from kswapd */
|
||||
WBT_DISCARD = 8, /* discard */
|
||||
|
||||
WBT_NR_BITS = 3, /* number of bits */
|
||||
WBT_NR_BITS = 4, /* number of bits */
|
||||
};
|
||||
|
||||
enum {
|
||||
WBT_RWQ_BG = 0,
|
||||
WBT_RWQ_KSWAPD,
|
||||
WBT_RWQ_DISCARD,
|
||||
WBT_NUM_RWQ,
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user