mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
block: kill blk_start_queueing()
blk_start_queueing() is identical to __blk_run_queue() except that it doesn't check for recursion. None of the current users depends on blk_start_queueing() running request_fn directly. Replace usages of blk_start_queueing() with [__]blk_run_queue() and kill it. [ Impact: removal of mostly duplicate interface function ] Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a538cd03be
commit
a7f5579234
@ -1312,12 +1312,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
|
||||
static void as_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct as_data *ad = container_of(work, struct as_data, antic_work);
|
||||
struct request_queue *q = ad->q;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_start_queueing(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
blk_run_queue(ad->q);
|
||||
}
|
||||
|
||||
static int as_may_queue(struct request_queue *q, int rw)
|
||||
|
@ -433,9 +433,7 @@ EXPORT_SYMBOL(__blk_run_queue);
|
||||
*
|
||||
* Description:
|
||||
* Invoke request handling on this queue, if it has pending work to do.
|
||||
* May be used to restart queueing when a request has completed. Also
|
||||
* See @blk_start_queueing.
|
||||
*
|
||||
* May be used to restart queueing when a request has completed.
|
||||
*/
|
||||
void blk_run_queue(struct request_queue *q)
|
||||
{
|
||||
@ -894,28 +892,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_request);
|
||||
|
||||
/**
|
||||
* blk_start_queueing - initiate dispatch of requests to device
|
||||
* @q: request queue to kick into gear
|
||||
*
|
||||
* This is basically a helper to remove the need to know whether a queue
|
||||
* is plugged or not if someone just wants to initiate dispatch of requests
|
||||
* for this queue. Should be used to start queueing on a device outside
|
||||
* of ->request_fn() context. Also see @blk_run_queue.
|
||||
*
|
||||
* The queue lock must be held with interrupts disabled.
|
||||
*/
|
||||
void blk_start_queueing(struct request_queue *q)
|
||||
{
|
||||
if (!blk_queue_plugged(q)) {
|
||||
if (unlikely(blk_queue_stopped(q)))
|
||||
return;
|
||||
q->request_fn(q);
|
||||
} else
|
||||
__generic_unplug_device(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queueing);
|
||||
|
||||
/**
|
||||
* blk_requeue_request - put a request back on queue
|
||||
* @q: request queue where request should be inserted
|
||||
@ -984,7 +960,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
|
||||
|
||||
drive_stat_acct(rq, 1);
|
||||
__elv_add_request(q, rq, where, 0);
|
||||
blk_start_queueing(q);
|
||||
__blk_run_queue(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_insert_request);
|
||||
|
@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
||||
cfqd->busy_queues > 1) {
|
||||
del_timer(&cfqd->idle_slice_timer);
|
||||
blk_start_queueing(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
}
|
||||
cfq_mark_cfqq_must_dispatch(cfqq);
|
||||
}
|
||||
@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
* this new queue is RT and the current one is BE
|
||||
*/
|
||||
cfq_preempt_queue(cfqd, cfqq);
|
||||
blk_start_queueing(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
|
||||
struct request_queue *q = cfqd->queue;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blk_start_queueing(q);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
|
@ -599,7 +599,7 @@ void elv_quiesce_start(struct request_queue *q)
|
||||
*/
|
||||
elv_drain_elevator(q);
|
||||
while (q->rq.elvpriv) {
|
||||
blk_start_queueing(q);
|
||||
__blk_run_queue(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
msleep(10);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
@ -643,8 +643,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||
* with anything. There's no point in delaying queue
|
||||
* processing.
|
||||
*/
|
||||
blk_remove_plug(q);
|
||||
blk_start_queueing(q);
|
||||
__blk_run_queue(q);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
@ -971,7 +970,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
|
||||
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
|
||||
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
|
||||
blk_start_queueing(q);
|
||||
__blk_run_queue(q);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -797,7 +797,6 @@ extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue(struct request_queue *);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern void blk_start_queueing(struct request_queue *);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, void __user *, unsigned long,
|
||||
gfp_t);
|
||||
|
Loading…
x
Reference in New Issue
Block a user