mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
block: add code to track actual device queue depth
For blk-mq, ->nr_requests does track queue depth, at least at init time. But for the older queue paths, it's simply a soft setting. On top of that, it's generally larger than the hardware setting on purpose, to allow backup of requests for merging. Fill a hole in struct request with a 'queue_depth' member, that drivers can call to more closely inform the block layer of the real queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Jan Kara <jack@suse.cz>
This commit is contained in:
parent
600271d900
commit
d278d4a889
@ -836,6 +836,18 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
||||
|
||||
/**
|
||||
* blk_set_queue_depth - tell the block layer about the device queue depth
|
||||
* @q: the request queue for the device
|
||||
* @depth: queue depth
|
||||
*
|
||||
*/
|
||||
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
|
||||
{
|
||||
q->queue_depth = depth;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_queue_depth);
|
||||
|
||||
/**
|
||||
* blk_queue_write_cache - configure queue's write cache
|
||||
* @q: the request queue for the device
|
||||
|
@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
|
||||
wmb();
|
||||
}
|
||||
|
||||
if (sdev->request_queue)
|
||||
blk_set_queue_depth(sdev->request_queue, depth);
|
||||
|
||||
return sdev->queue_depth;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_change_queue_depth);
|
||||
|
@ -405,6 +405,8 @@ struct request_queue {
|
||||
struct blk_mq_ctx __percpu *queue_ctx;
|
||||
unsigned int nr_queues;
|
||||
|
||||
unsigned int queue_depth;
|
||||
|
||||
/* hw dispatch queues */
|
||||
struct blk_mq_hw_ctx **queue_hw_ctx;
|
||||
unsigned int nr_hw_queues;
|
||||
@ -777,6 +779,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_depth(struct request_queue *q)
|
||||
{
|
||||
if (q->queue_depth)
|
||||
return q->queue_depth;
|
||||
|
||||
return q->nr_requests;
|
||||
}
|
||||
|
||||
/*
|
||||
* q->prep_rq_fn return values
|
||||
*/
|
||||
@ -1094,6 +1104,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
|
||||
extern void blk_set_default_limits(struct queue_limits *lim);
|
||||
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
||||
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
|
Loading…
Reference in New Issue
Block a user