mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 18:26:42 +00:00
block: switch to atomic_t for request references
refcount_t is not as expensive as it used to be, but it's still more expensive than the io_uring method of using atomic_t and just checking for potential over/underflow. This borrows that same implementation, which in turn is based on the mm implementation from Linus. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ceaa762527
commit
0a467d0fdd
@ -229,7 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||
/* release the tag's ownership to the req cloned from */
|
||||
spin_lock_irqsave(&fq->mq_flush_lock, flags);
|
||||
|
||||
if (!refcount_dec_and_test(&flush_rq->ref)) {
|
||||
if (!req_ref_put_and_test(flush_rq)) {
|
||||
fq->rq_status = error;
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
return;
|
||||
@ -349,7 +349,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||
* and READ flush_rq->end_io
|
||||
*/
|
||||
smp_wmb();
|
||||
refcount_set(&flush_rq->ref, 1);
|
||||
req_ref_set(flush_rq, 1);
|
||||
|
||||
blk_flush_queue_rq(flush_rq, false);
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
||||
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
rq = tags->rqs[bitnr];
|
||||
if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
|
||||
if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
|
||||
rq = NULL;
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return rq;
|
||||
|
@ -394,7 +394,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
/* tag was already set */
|
||||
WRITE_ONCE(rq->deadline, 0);
|
||||
refcount_set(&rq->ref, 1);
|
||||
req_ref_set(rq, 1);
|
||||
|
||||
if (rq->rq_flags & RQF_ELV) {
|
||||
struct elevator_queue *e = data->q->elevator;
|
||||
@ -642,7 +642,7 @@ void blk_mq_free_request(struct request *rq)
|
||||
rq_qos_done(q, rq);
|
||||
|
||||
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
||||
if (refcount_dec_and_test(&rq->ref))
|
||||
if (req_ref_put_and_test(rq))
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_free_request);
|
||||
@ -938,7 +938,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
|
||||
rq_qos_done(rq->q, rq);
|
||||
|
||||
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
||||
if (!refcount_dec_and_test(&rq->ref))
|
||||
if (!req_ref_put_and_test(rq))
|
||||
continue;
|
||||
|
||||
blk_crypto_free_request(rq);
|
||||
@ -1401,7 +1401,7 @@ void blk_mq_put_rq_ref(struct request *rq)
|
||||
{
|
||||
if (is_flush_rq(rq))
|
||||
rq->end_io(rq, 0);
|
||||
else if (refcount_dec_and_test(&rq->ref))
|
||||
else if (req_ref_put_and_test(rq))
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
@ -3049,7 +3049,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
|
||||
unsigned long rq_addr = (unsigned long)rq;
|
||||
|
||||
if (rq_addr >= start && rq_addr < end) {
|
||||
WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
|
||||
WARN_ON_ONCE(req_ref_read(rq) != 0);
|
||||
cmpxchg(&drv_tags->rqs[i], rq, NULL);
|
||||
}
|
||||
}
|
||||
@ -3383,7 +3383,7 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
|
||||
if (!tags)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
|
||||
WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
|
||||
|
||||
for (i = 0; i < queue_depth; i++)
|
||||
cmpxchg(&tags->rqs[i], flush_rq, NULL);
|
||||
|
31
block/blk.h
31
block/blk.h
@ -461,4 +461,35 @@ static inline bool should_fail_request(struct block_device *part,
|
||||
}
|
||||
#endif /* CONFIG_FAIL_MAKE_REQUEST */
|
||||
|
||||
/*
|
||||
* Optimized request reference counting. Ideally we'd make timeouts be more
|
||||
* clever, as that's the only reason we need references at all... But until
|
||||
* this happens, this is faster than using refcount_t. Also see:
|
||||
*
|
||||
* abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
|
||||
*/
|
||||
#define req_ref_zero_or_close_to_overflow(req) \
|
||||
((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
|
||||
|
||||
static inline bool req_ref_inc_not_zero(struct request *req)
|
||||
{
|
||||
return atomic_inc_not_zero(&req->ref);
|
||||
}
|
||||
|
||||
static inline bool req_ref_put_and_test(struct request *req)
|
||||
{
|
||||
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||
return atomic_dec_and_test(&req->ref);
|
||||
}
|
||||
|
||||
static inline void req_ref_set(struct request *req, int value)
|
||||
{
|
||||
atomic_set(&req->ref, value);
|
||||
}
|
||||
|
||||
static inline int req_ref_read(struct request *req)
|
||||
{
|
||||
return atomic_read(&req->ref);
|
||||
}
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
@ -139,7 +139,7 @@ struct request {
|
||||
unsigned short ioprio;
|
||||
|
||||
enum mq_rq_state state;
|
||||
refcount_t ref;
|
||||
atomic_t ref;
|
||||
|
||||
unsigned long deadline;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user