mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
block: remove dead elevator code
This removes a bunch of core and elevator related code. On the core front, we remove anything related to queue running, draining, initialization, plugging, and congestions. We also kill anything related to request allocation, merging, retrieval, and completion. Remove any checking for single queue IO schedulers, as they no longer exist. This means we can also delete a bunch of code related to request issue, adding, completion, etc - and all the SQ related ops and helpers. Also kill the load_default_modules(), as all that did was provide for a way to load the default single queue elevator. Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f382fb0bce
commit
a1ce35fa49
@ -5745,7 +5745,6 @@ static struct elevator_type iosched_bfq_mq = {
|
||||
.exit_sched = bfq_exit_queue,
|
||||
},
|
||||
|
||||
.uses_mq = true,
|
||||
.icq_size = sizeof(struct bfq_io_cq),
|
||||
.icq_align = __alignof__(struct bfq_io_cq),
|
||||
.elevator_attrs = bfq_attrs,
|
||||
|
1709
block/blk-core.c
1709
block/blk-core.c
File diff suppressed because it is too large
Load Diff
@ -48,8 +48,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
struct request *rq, int at_head,
|
||||
rq_end_io_fn *done)
|
||||
{
|
||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||
|
||||
@ -60,23 +58,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
* don't check dying flag for MQ because the request won't
|
||||
* be reused after dying flag is set
|
||||
*/
|
||||
if (q->mq_ops) {
|
||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dying(q))) {
|
||||
rq->rq_flags |= RQF_QUIET;
|
||||
__blk_end_request_all(rq, BLK_STS_IOERR);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
__elv_add_request(q, rq, where);
|
||||
__blk_run_queue(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
|
||||
|
@ -48,10 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
return;
|
||||
|
||||
if (et->uses_mq && et->ops.mq.exit_icq)
|
||||
if (et->ops.mq.exit_icq)
|
||||
et->ops.mq.exit_icq(icq);
|
||||
else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
|
||||
et->ops.sq.elevator_exit_icq_fn(icq);
|
||||
|
||||
icq->flags |= ICQ_EXITED;
|
||||
}
|
||||
@ -187,25 +185,13 @@ void put_io_context_active(struct io_context *ioc)
|
||||
* reverse double locking. Read comment in ioc_release_fn() for
|
||||
* explanation on the nested locking annotation.
|
||||
*/
|
||||
retry:
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
continue;
|
||||
|
||||
et = icq->q->elevator->type;
|
||||
if (et->uses_mq) {
|
||||
ioc_exit_icq(icq);
|
||||
} else {
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
ioc_exit_icq(icq);
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
ioc_exit_icq(icq);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
||||
@ -232,7 +218,7 @@ static void __ioc_clear_queue(struct list_head *icq_list)
|
||||
|
||||
while (!list_empty(icq_list)) {
|
||||
struct io_cq *icq = list_entry(icq_list->next,
|
||||
struct io_cq, q_node);
|
||||
struct io_cq, q_node);
|
||||
struct io_context *ioc = icq->ioc;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
@ -253,14 +239,9 @@ void ioc_clear_queue(struct request_queue *q)
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
list_splice_init(&q->icq_list, &icq_list);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (q->mq_ops) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
__ioc_clear_queue(&icq_list);
|
||||
} else {
|
||||
__ioc_clear_queue(&icq_list);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
__ioc_clear_queue(&icq_list);
|
||||
}
|
||||
|
||||
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
|
||||
@ -415,10 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
||||
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
|
||||
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
|
||||
list_add(&icq->q_node, &q->icq_list);
|
||||
if (et->uses_mq && et->ops.mq.init_icq)
|
||||
if (et->ops.mq.init_icq)
|
||||
et->ops.mq.init_icq(icq);
|
||||
else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
|
||||
et->ops.sq.elevator_init_icq_fn(icq);
|
||||
} else {
|
||||
kmem_cache_free(et->icq_cache, icq);
|
||||
icq = ioc_lookup_icq(ioc, q);
|
||||
|
@ -862,13 +862,8 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct request *free;
|
||||
|
||||
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
|
||||
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
|
||||
return 0;
|
||||
|
||||
free = attempt_merge(q, rq, next);
|
||||
if (free) {
|
||||
__blk_put_request(q, free);
|
||||
|
@ -20,40 +20,6 @@ EXPORT_SYMBOL(blk_max_low_pfn);
|
||||
|
||||
unsigned long blk_max_pfn;
|
||||
|
||||
/**
|
||||
* blk_queue_prep_rq - set a prepare_request function for queue
|
||||
* @q: queue
|
||||
* @pfn: prepare_request function
|
||||
*
|
||||
* It's possible for a queue to register a prepare_request callback which
|
||||
* is invoked before the request is handed to the request_fn. The goal of
|
||||
* the function is to prepare a request for I/O, it can be used to build a
|
||||
* cdb from the request data for instance.
|
||||
*
|
||||
*/
|
||||
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
|
||||
{
|
||||
q->prep_rq_fn = pfn;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_prep_rq);
|
||||
|
||||
/**
|
||||
* blk_queue_unprep_rq - set an unprepare_request function for queue
|
||||
* @q: queue
|
||||
* @ufn: unprepare_request function
|
||||
*
|
||||
* It's possible for a queue to register an unprepare_request callback
|
||||
* which is invoked before the request is finally completed. The goal
|
||||
* of the function is to deallocate any data that was allocated in the
|
||||
* prepare_request callback.
|
||||
*
|
||||
*/
|
||||
void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
|
||||
{
|
||||
q->unprep_rq_fn = ufn;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_unprep_rq);
|
||||
|
||||
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
|
||||
{
|
||||
q->softirq_done_fn = fn;
|
||||
@ -163,8 +129,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||
|
||||
q->make_request_fn = mfn;
|
||||
blk_queue_dma_alignment(q, 511);
|
||||
blk_queue_congestion_threshold(q);
|
||||
q->nr_batching = BLK_BATCH_REQ;
|
||||
|
||||
blk_set_default_limits(&q->limits);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||
unsigned long nr;
|
||||
int ret, err;
|
||||
|
||||
if (!q->request_fn && !q->mq_ops)
|
||||
if (!q->mq_ops)
|
||||
return -EINVAL;
|
||||
|
||||
ret = queue_var_store(&nr, page, count);
|
||||
@ -78,11 +78,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||
if (nr < BLKDEV_MIN_RQ)
|
||||
nr = BLKDEV_MIN_RQ;
|
||||
|
||||
if (q->request_fn)
|
||||
err = blk_update_nr_requests(q, nr);
|
||||
else
|
||||
err = blk_mq_update_nr_requests(q, nr);
|
||||
|
||||
err = blk_mq_update_nr_requests(q, nr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -463,20 +459,14 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
|
||||
* ends up either enabling or disabling wbt completely. We can't
|
||||
* have IO inflight if that happens.
|
||||
*/
|
||||
if (q->mq_ops) {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
} else
|
||||
blk_queue_bypass_start(q);
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
wbt_set_min_lat(q, val);
|
||||
wbt_update_limits(q);
|
||||
|
||||
if (q->mq_ops) {
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
} else
|
||||
blk_queue_bypass_end(q);
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -847,17 +837,10 @@ static void __blk_release_queue(struct work_struct *work)
|
||||
|
||||
blk_free_queue_stats(q->stats);
|
||||
|
||||
blk_exit_rl(q, &q->root_rl);
|
||||
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
|
||||
if (!q->mq_ops) {
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, q->fq->flush_rq);
|
||||
blk_free_flush_queue(q->fq);
|
||||
} else {
|
||||
if (q->mq_ops)
|
||||
blk_mq_release(q);
|
||||
}
|
||||
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
@ -920,7 +903,6 @@ int blk_register_queue(struct gendisk *disk)
|
||||
if (!blk_queue_init_done(q)) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
|
||||
percpu_ref_switch_to_percpu(&q->q_usage_counter);
|
||||
blk_queue_bypass_end(q);
|
||||
}
|
||||
|
||||
ret = blk_trace_init_sysfs(dev);
|
||||
@ -947,7 +929,7 @@ int blk_register_queue(struct gendisk *disk)
|
||||
|
||||
blk_throtl_register_queue(q);
|
||||
|
||||
if (q->request_fn || (q->mq_ops && q->elevator)) {
|
||||
if ((q->mq_ops && q->elevator)) {
|
||||
ret = elv_register_queue(q);
|
||||
if (ret) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
@ -1005,7 +987,7 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||
blk_trace_remove_sysfs(disk_to_dev(disk));
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
if (q->request_fn || (q->mq_ops && q->elevator))
|
||||
if (q->mq_ops && q->elevator)
|
||||
elv_unregister_queue(q);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
|
51
block/blk.h
51
block/blk.h
@ -7,12 +7,6 @@
|
||||
#include <xen/xen.h>
|
||||
#include "blk-mq.h"
|
||||
|
||||
/* Amount of time in which a process may batch requests */
|
||||
#define BLK_BATCH_TIME (HZ/50UL)
|
||||
|
||||
/* Number of requests a "batching" process may submit */
|
||||
#define BLK_BATCH_REQ 32
|
||||
|
||||
/* Max future timer expiry for timeouts */
|
||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||
|
||||
@ -132,9 +126,6 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl);
|
||||
void blk_exit_queue(struct request_queue *q);
|
||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
void blk_queue_bypass_start(struct request_queue *q);
|
||||
void blk_queue_bypass_end(struct request_queue *q);
|
||||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
|
||||
static inline void blk_queue_enter_live(struct request_queue *q)
|
||||
@ -281,23 +272,6 @@ static inline bool blk_rq_is_complete(struct request *rq)
|
||||
|
||||
void blk_insert_flush(struct request *rq);
|
||||
|
||||
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.sq.elevator_activate_req_fn)
|
||||
e->type->ops.sq.elevator_activate_req_fn(q, rq);
|
||||
}
|
||||
|
||||
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.sq.elevator_deactivate_req_fn)
|
||||
e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
|
||||
}
|
||||
|
||||
int elevator_init(struct request_queue *);
|
||||
int elevator_init_mq(struct request_queue *q);
|
||||
int elevator_switch_mq(struct request_queue *q,
|
||||
struct elevator_type *new_e);
|
||||
@ -332,31 +306,8 @@ void blk_rq_set_mixed_merge(struct request *rq);
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
|
||||
|
||||
void blk_queue_congestion_threshold(struct request_queue *q);
|
||||
|
||||
int blk_dev_init(void);
|
||||
|
||||
|
||||
/*
|
||||
* Return the threshold (number of used requests) at which the queue is
|
||||
* considered to be congested. It include a little hysteresis to keep the
|
||||
* context switch rate down.
|
||||
*/
|
||||
static inline int queue_congestion_on_threshold(struct request_queue *q)
|
||||
{
|
||||
return q->nr_congestion_on;
|
||||
}
|
||||
|
||||
/*
|
||||
* The threshold at which a queue is considered to be uncongested
|
||||
*/
|
||||
static inline int queue_congestion_off_threshold(struct request_queue *q)
|
||||
{
|
||||
return q->nr_congestion_off;
|
||||
}
|
||||
|
||||
extern int blk_update_nr_requests(struct request_queue *, unsigned int);
|
||||
|
||||
/*
|
||||
* Contribute to IO statistics IFF:
|
||||
*
|
||||
@ -478,8 +429,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
||||
}
|
||||
#endif /* CONFIG_BOUNCE */
|
||||
|
||||
extern void blk_drain_queue(struct request_queue *q);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
|
||||
extern int blk_iolatency_init(struct request_queue *q);
|
||||
#else
|
||||
|
377
block/elevator.c
377
block/elevator.c
@ -61,10 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
|
||||
struct request_queue *q = rq->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.allow_merge)
|
||||
if (e->type->ops.mq.allow_merge)
|
||||
return e->type->ops.mq.allow_merge(q, rq, bio);
|
||||
else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
|
||||
return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -95,14 +93,14 @@ static bool elevator_match(const struct elevator_type *e, const char *name)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return scheduler with name 'name' and with matching 'mq capability
|
||||
* Return scheduler with name 'name'
|
||||
*/
|
||||
static struct elevator_type *elevator_find(const char *name, bool mq)
|
||||
static struct elevator_type *elevator_find(const char *name)
|
||||
{
|
||||
struct elevator_type *e;
|
||||
|
||||
list_for_each_entry(e, &elv_list, list) {
|
||||
if (elevator_match(e, name) && (mq == e->uses_mq))
|
||||
if (elevator_match(e, name))
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -121,12 +119,12 @@ static struct elevator_type *elevator_get(struct request_queue *q,
|
||||
|
||||
spin_lock(&elv_list_lock);
|
||||
|
||||
e = elevator_find(name, q->mq_ops != NULL);
|
||||
e = elevator_find(name);
|
||||
if (!e && try_loading) {
|
||||
spin_unlock(&elv_list_lock);
|
||||
request_module("%s-iosched", name);
|
||||
spin_lock(&elv_list_lock);
|
||||
e = elevator_find(name, q->mq_ops != NULL);
|
||||
e = elevator_find(name);
|
||||
}
|
||||
|
||||
if (e && !try_module_get(e->elevator_owner))
|
||||
@ -150,26 +148,6 @@ static int __init elevator_setup(char *str)
|
||||
|
||||
__setup("elevator=", elevator_setup);
|
||||
|
||||
/* called during boot to load the elevator chosen by the elevator param */
|
||||
void __init load_default_elevator_module(void)
|
||||
{
|
||||
struct elevator_type *e;
|
||||
|
||||
if (!chosen_elevator[0])
|
||||
return;
|
||||
|
||||
/*
|
||||
* Boot parameter is deprecated, we haven't supported that for MQ.
|
||||
* Only look for non-mq schedulers from here.
|
||||
*/
|
||||
spin_lock(&elv_list_lock);
|
||||
e = elevator_find(chosen_elevator, false);
|
||||
spin_unlock(&elv_list_lock);
|
||||
|
||||
if (!e)
|
||||
request_module("%s-iosched", chosen_elevator);
|
||||
}
|
||||
|
||||
static struct kobj_type elv_ktype;
|
||||
|
||||
struct elevator_queue *elevator_alloc(struct request_queue *q,
|
||||
@ -185,7 +163,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
|
||||
kobject_init(&eq->kobj, &elv_ktype);
|
||||
mutex_init(&eq->sysfs_lock);
|
||||
hash_init(eq->hash);
|
||||
eq->uses_mq = e->uses_mq;
|
||||
|
||||
return eq;
|
||||
}
|
||||
@ -200,52 +177,11 @@ static void elevator_release(struct kobject *kobj)
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the default elevator specified by config boot param for non-mq devices,
|
||||
* or by config option. Don't try to load modules as we could be running off
|
||||
* async and request_module() isn't allowed from async.
|
||||
*/
|
||||
int elevator_init(struct request_queue *q)
|
||||
{
|
||||
struct elevator_type *e = NULL;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* q->sysfs_lock must be held to provide mutual exclusion between
|
||||
* elevator_switch() and here.
|
||||
*/
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
if (unlikely(q->elevator))
|
||||
goto out_unlock;
|
||||
|
||||
if (*chosen_elevator) {
|
||||
e = elevator_get(q, chosen_elevator, false);
|
||||
if (!e)
|
||||
printk(KERN_ERR "I/O scheduler %s not found\n",
|
||||
chosen_elevator);
|
||||
}
|
||||
|
||||
if (!e) {
|
||||
printk(KERN_ERR
|
||||
"Default I/O scheduler not found. Using noop.\n");
|
||||
e = elevator_get(q, "noop", false);
|
||||
}
|
||||
|
||||
err = e->ops.sq.elevator_init_fn(q, e);
|
||||
if (err)
|
||||
elevator_put(e);
|
||||
out_unlock:
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
void elevator_exit(struct request_queue *q, struct elevator_queue *e)
|
||||
{
|
||||
mutex_lock(&e->sysfs_lock);
|
||||
if (e->uses_mq && e->type->ops.mq.exit_sched)
|
||||
if (e->type->ops.mq.exit_sched)
|
||||
blk_mq_exit_sched(q, e);
|
||||
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
|
||||
e->type->ops.sq.elevator_exit_fn(e);
|
||||
mutex_unlock(&e->sysfs_lock);
|
||||
|
||||
kobject_put(&e->kobj);
|
||||
@ -393,10 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
}
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.request_merge)
|
||||
if (e->type->ops.mq.request_merge)
|
||||
return e->type->ops.mq.request_merge(q, req, bio);
|
||||
else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
|
||||
return e->type->ops.sq.elevator_merge_fn(q, req, bio);
|
||||
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
@ -447,10 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq,
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.request_merged)
|
||||
if (e->type->ops.mq.request_merged)
|
||||
e->type->ops.mq.request_merged(q, rq, type);
|
||||
else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
|
||||
e->type->ops.sq.elevator_merged_fn(q, rq, type);
|
||||
|
||||
if (type == ELEVATOR_BACK_MERGE)
|
||||
elv_rqhash_reposition(q, rq);
|
||||
@ -464,13 +396,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
|
||||
struct elevator_queue *e = q->elevator;
|
||||
bool next_sorted = false;
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.requests_merged)
|
||||
if (e->type->ops.mq.requests_merged)
|
||||
e->type->ops.mq.requests_merged(q, rq, next);
|
||||
else if (e->type->ops.sq.elevator_merge_req_fn) {
|
||||
next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
|
||||
if (next_sorted)
|
||||
e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
|
||||
}
|
||||
|
||||
elv_rqhash_reposition(q, rq);
|
||||
|
||||
@ -482,156 +409,12 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
|
||||
q->last_merge = rq;
|
||||
}
|
||||
|
||||
void elv_bio_merged(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return;
|
||||
|
||||
if (e->type->ops.sq.elevator_bio_merged_fn)
|
||||
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
|
||||
}
|
||||
|
||||
void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
/*
|
||||
* it already went through dequeue, we need to decrement the
|
||||
* in_flight count again
|
||||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if (rq->rq_flags & RQF_SORTED)
|
||||
elv_deactivate_rq(q, rq);
|
||||
}
|
||||
|
||||
rq->rq_flags &= ~RQF_STARTED;
|
||||
|
||||
blk_pm_requeue_request(rq);
|
||||
|
||||
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
|
||||
}
|
||||
|
||||
void elv_drain_elevator(struct request_queue *q)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
static int printed;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
|
||||
;
|
||||
if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
|
||||
printk(KERN_ERR "%s: forced dispatching is broken "
|
||||
"(nr_sorted=%u), please report this\n",
|
||||
q->elevator->type->elevator_name, q->nr_sorted);
|
||||
}
|
||||
}
|
||||
|
||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
{
|
||||
trace_block_rq_insert(q, rq);
|
||||
|
||||
blk_pm_add_request(q, rq);
|
||||
|
||||
rq->q = q;
|
||||
|
||||
if (rq->rq_flags & RQF_SOFTBARRIER) {
|
||||
/* barriers are scheduling boundary, update end_sector */
|
||||
if (!blk_rq_is_passthrough(rq)) {
|
||||
q->end_sector = rq_end_sector(rq);
|
||||
q->boundary_rq = rq;
|
||||
}
|
||||
} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
|
||||
(where == ELEVATOR_INSERT_SORT ||
|
||||
where == ELEVATOR_INSERT_SORT_MERGE))
|
||||
where = ELEVATOR_INSERT_BACK;
|
||||
|
||||
switch (where) {
|
||||
case ELEVATOR_INSERT_REQUEUE:
|
||||
case ELEVATOR_INSERT_FRONT:
|
||||
rq->rq_flags |= RQF_SOFTBARRIER;
|
||||
list_add(&rq->queuelist, &q->queue_head);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_BACK:
|
||||
rq->rq_flags |= RQF_SOFTBARRIER;
|
||||
elv_drain_elevator(q);
|
||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||
/*
|
||||
* We kick the queue here for the following reasons.
|
||||
* - The elevator might have returned NULL previously
|
||||
* to delay requests and returned them now. As the
|
||||
* queue wasn't empty before this request, ll_rw_blk
|
||||
* won't run the queue on return, resulting in hang.
|
||||
* - Usually, back inserted requests won't be merged
|
||||
* with anything. There's no point in delaying queue
|
||||
* processing.
|
||||
*/
|
||||
__blk_run_queue(q);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT_MERGE:
|
||||
/*
|
||||
* If we succeed in merging this request with one in the
|
||||
* queue already, we are done - rq has now been freed,
|
||||
* so no need to do anything further.
|
||||
*/
|
||||
if (elv_attempt_insert_merge(q, rq))
|
||||
break;
|
||||
/* fall through */
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(blk_rq_is_passthrough(rq));
|
||||
rq->rq_flags |= RQF_SORTED;
|
||||
q->nr_sorted++;
|
||||
if (rq_mergeable(rq)) {
|
||||
elv_rqhash_add(q, rq);
|
||||
if (!q->last_merge)
|
||||
q->last_merge = rq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some ioscheds (cfq) run q->request_fn directly, so
|
||||
* rq cannot be accessed after calling
|
||||
* elevator_add_req_fn.
|
||||
*/
|
||||
q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_FLUSH:
|
||||
rq->rq_flags |= RQF_SOFTBARRIER;
|
||||
blk_insert_flush(rq);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s: bad insertion point %d\n",
|
||||
__func__, where);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__elv_add_request);
|
||||
|
||||
void elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
__elv_add_request(q, rq, where);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(elv_add_request);
|
||||
|
||||
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.next_request)
|
||||
if (e->type->ops.mq.next_request)
|
||||
return e->type->ops.mq.next_request(q, rq);
|
||||
else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
|
||||
return e->type->ops.sq.elevator_latter_req_fn(q, rq);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -640,68 +423,12 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->uses_mq && e->type->ops.mq.former_request)
|
||||
if (e->type->ops.mq.former_request)
|
||||
return e->type->ops.mq.former_request(q, rq);
|
||||
if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
|
||||
return e->type->ops.sq.elevator_former_req_fn(q, rq);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return 0;
|
||||
|
||||
if (e->type->ops.sq.elevator_set_req_fn)
|
||||
return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void elv_put_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return;
|
||||
|
||||
if (e->type->ops.sq.elevator_put_req_fn)
|
||||
e->type->ops.sq.elevator_put_req_fn(rq);
|
||||
}
|
||||
|
||||
int elv_may_queue(struct request_queue *q, unsigned int op)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return 0;
|
||||
|
||||
if (e->type->ops.sq.elevator_may_queue_fn)
|
||||
return e->type->ops.sq.elevator_may_queue_fn(q, op);
|
||||
|
||||
return ELV_MQUEUE_MAY;
|
||||
}
|
||||
|
||||
void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (WARN_ON_ONCE(e->uses_mq))
|
||||
return;
|
||||
|
||||
/*
|
||||
* request is released from the driver, io must be done
|
||||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if ((rq->rq_flags & RQF_SORTED) &&
|
||||
e->type->ops.sq.elevator_completed_req_fn)
|
||||
e->type->ops.sq.elevator_completed_req_fn(q, rq);
|
||||
}
|
||||
}
|
||||
|
||||
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
|
||||
|
||||
static ssize_t
|
||||
@ -768,8 +495,6 @@ int elv_register_queue(struct request_queue *q)
|
||||
}
|
||||
kobject_uevent(&e->kobj, KOBJ_ADD);
|
||||
e->registered = 1;
|
||||
if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
|
||||
e->type->ops.sq.elevator_registered_fn(q);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
@ -809,7 +534,7 @@ int elv_register(struct elevator_type *e)
|
||||
|
||||
/* register, don't allow duplicate names */
|
||||
spin_lock(&elv_list_lock);
|
||||
if (elevator_find(e->elevator_name, e->uses_mq)) {
|
||||
if (elevator_find(e->elevator_name)) {
|
||||
spin_unlock(&elv_list_lock);
|
||||
kmem_cache_destroy(e->icq_cache);
|
||||
return -EBUSY;
|
||||
@ -919,71 +644,17 @@ int elevator_init_mq(struct request_queue *q)
|
||||
*/
|
||||
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
{
|
||||
struct elevator_queue *old = q->elevator;
|
||||
bool old_registered = false;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
if (q->mq_ops) {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
err = elevator_switch_mq(q, new_e);
|
||||
err = elevator_switch_mq(q, new_e);
|
||||
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data.
|
||||
* Block layer doesn't call into a quiesced elevator - all requests
|
||||
* are directly put on the dispatch list without elevator data
|
||||
* using INSERT_BACK. All requests have SOFTBARRIER set and no
|
||||
* merge happens either.
|
||||
*/
|
||||
if (old) {
|
||||
old_registered = old->registered;
|
||||
|
||||
blk_queue_bypass_start(q);
|
||||
|
||||
/* unregister and clear all auxiliary data of the old elevator */
|
||||
if (old_registered)
|
||||
elv_unregister_queue(q);
|
||||
|
||||
ioc_clear_queue(q);
|
||||
}
|
||||
|
||||
/* allocate, init and register new elevator */
|
||||
err = new_e->ops.sq.elevator_init_fn(q, new_e);
|
||||
if (err)
|
||||
goto fail_init;
|
||||
|
||||
err = elv_register_queue(q);
|
||||
if (err)
|
||||
goto fail_register;
|
||||
|
||||
/* done, kill the old one and finish */
|
||||
if (old) {
|
||||
elevator_exit(q, old);
|
||||
blk_queue_bypass_end(q);
|
||||
}
|
||||
|
||||
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_register:
|
||||
elevator_exit(q, q->elevator);
|
||||
fail_init:
|
||||
/* switch failed, restore and re-register old elevator */
|
||||
if (old) {
|
||||
q->elevator = old;
|
||||
elv_register_queue(q);
|
||||
blk_queue_bypass_end(q);
|
||||
}
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1032,7 +703,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
|
||||
if (!q->mq_ops || !elv_support_iosched(q))
|
||||
return count;
|
||||
|
||||
ret = __elevator_change(q, name);
|
||||
@ -1047,7 +718,6 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct elevator_type *elv = NULL;
|
||||
struct elevator_type *__e;
|
||||
bool uses_mq = q->mq_ops != NULL;
|
||||
int len = 0;
|
||||
|
||||
if (!queue_is_rq_based(q))
|
||||
@ -1060,14 +730,11 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||
|
||||
spin_lock(&elv_list_lock);
|
||||
list_for_each_entry(__e, &elv_list, list) {
|
||||
if (elv && elevator_match(elv, __e->elevator_name) &&
|
||||
(__e->uses_mq == uses_mq)) {
|
||||
if (elv && elevator_match(elv, __e->elevator_name)) {
|
||||
len += sprintf(name+len, "[%s] ", elv->elevator_name);
|
||||
continue;
|
||||
}
|
||||
if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
|
||||
len += sprintf(name+len, "%s ", __e->elevator_name);
|
||||
else if (!__e->uses_mq && !q->mq_ops)
|
||||
if (elv_support_iosched(q))
|
||||
len += sprintf(name+len, "%s ", __e->elevator_name);
|
||||
}
|
||||
spin_unlock(&elv_list_lock);
|
||||
|
@ -1032,7 +1032,6 @@ static struct elevator_type kyber_sched = {
|
||||
.dispatch_request = kyber_dispatch_request,
|
||||
.has_work = kyber_has_work,
|
||||
},
|
||||
.uses_mq = true,
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
|
||||
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
|
||||
|
@ -777,7 +777,6 @@ static struct elevator_type mq_deadline = {
|
||||
.exit_sched = dd_exit_queue,
|
||||
},
|
||||
|
||||
.uses_mq = true,
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
|
||||
#endif
|
||||
|
@ -58,9 +58,6 @@ struct blk_stat_callback;
|
||||
|
||||
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
|
||||
|
||||
#define BLK_RL_SYNCFULL (1U << 0)
|
||||
#define BLK_RL_ASYNCFULL (1U << 1)
|
||||
|
||||
struct request_list {
|
||||
struct request_queue *q; /* the queue this rl belongs to */
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
@ -309,11 +306,8 @@ static inline unsigned short req_get_ioprio(struct request *req)
|
||||
|
||||
struct blk_queue_ctx;
|
||||
|
||||
typedef void (request_fn_proc) (struct request_queue *q);
|
||||
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
|
||||
typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
|
||||
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
||||
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
|
||||
|
||||
struct bio_vec;
|
||||
typedef void (softirq_done_fn)(struct request *);
|
||||
@ -432,8 +426,6 @@ struct request_queue {
|
||||
struct list_head queue_head;
|
||||
struct request *last_merge;
|
||||
struct elevator_queue *elevator;
|
||||
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
||||
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
||||
|
||||
struct blk_queue_stats *stats;
|
||||
struct rq_qos *rq_qos;
|
||||
@ -446,11 +438,8 @@ struct request_queue {
|
||||
*/
|
||||
struct request_list root_rl;
|
||||
|
||||
request_fn_proc *request_fn;
|
||||
make_request_fn *make_request_fn;
|
||||
poll_q_fn *poll_fn;
|
||||
prep_rq_fn *prep_rq_fn;
|
||||
unprep_rq_fn *unprep_rq_fn;
|
||||
softirq_done_fn *softirq_done_fn;
|
||||
rq_timed_out_fn *rq_timed_out_fn;
|
||||
dma_drain_needed_fn *dma_drain_needed;
|
||||
@ -458,8 +447,6 @@ struct request_queue {
|
||||
init_rq_fn *init_rq_fn;
|
||||
/* Called just before a request is freed */
|
||||
exit_rq_fn *exit_rq_fn;
|
||||
/* Called from inside blk_get_request() */
|
||||
void (*initialize_rq_fn)(struct request *rq);
|
||||
|
||||
const struct blk_mq_ops *mq_ops;
|
||||
|
||||
@ -475,17 +462,6 @@ struct request_queue {
|
||||
struct blk_mq_hw_ctx **queue_hw_ctx;
|
||||
unsigned int nr_hw_queues;
|
||||
|
||||
/*
|
||||
* Dispatch queue sorting
|
||||
*/
|
||||
sector_t end_sector;
|
||||
struct request *boundary_rq;
|
||||
|
||||
/*
|
||||
* Delayed queue handling
|
||||
*/
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct backing_dev_info *backing_dev_info;
|
||||
|
||||
/*
|
||||
@ -548,9 +524,6 @@ struct request_queue {
|
||||
* queue settings
|
||||
*/
|
||||
unsigned long nr_requests; /* Max # of requests */
|
||||
unsigned int nr_congestion_on;
|
||||
unsigned int nr_congestion_off;
|
||||
unsigned int nr_batching;
|
||||
|
||||
unsigned int dma_drain_size;
|
||||
void *dma_drain_buffer;
|
||||
@ -560,13 +533,6 @@ struct request_queue {
|
||||
unsigned int nr_sorted;
|
||||
unsigned int in_flight[2];
|
||||
|
||||
/*
|
||||
* Number of active block driver functions for which blk_drain_queue()
|
||||
* must wait. Must be incremented around functions that unlock the
|
||||
* queue_lock internally, e.g. scsi_request_fn().
|
||||
*/
|
||||
unsigned int request_fn_active;
|
||||
|
||||
unsigned int rq_timeout;
|
||||
int poll_nsec;
|
||||
|
||||
@ -740,11 +706,6 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
|
||||
extern void blk_set_pm_only(struct request_queue *q);
|
||||
extern void blk_clear_pm_only(struct request_queue *q);
|
||||
|
||||
static inline int queue_in_flight(struct request_queue *q)
|
||||
{
|
||||
return q->in_flight[0] + q->in_flight[1];
|
||||
}
|
||||
|
||||
static inline bool blk_account_rq(struct request *rq)
|
||||
{
|
||||
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
|
||||
@ -765,7 +726,7 @@ static inline bool blk_account_rq(struct request *rq)
|
||||
*/
|
||||
static inline bool queue_is_rq_based(struct request_queue *q)
|
||||
{
|
||||
return q->request_fn || q->mq_ops;
|
||||
return q->mq_ops;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_cluster(struct request_queue *q)
|
||||
@ -828,27 +789,6 @@ static inline bool rq_is_sync(struct request *rq)
|
||||
return op_is_sync(rq->cmd_flags);
|
||||
}
|
||||
|
||||
static inline bool blk_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
return rl->flags & flag;
|
||||
}
|
||||
|
||||
static inline void blk_set_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags |= flag;
|
||||
}
|
||||
|
||||
static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags &= ~flag;
|
||||
}
|
||||
|
||||
static inline bool rq_mergeable(struct request *rq)
|
||||
{
|
||||
if (blk_rq_is_passthrough(rq))
|
||||
@ -969,7 +909,6 @@ extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
|
||||
blk_mq_req_flags_t flags);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
@ -979,7 +918,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
|
||||
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
||||
extern void blk_delay_queue(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_split(struct request_queue *, struct bio **);
|
||||
extern void blk_recount_segments(struct request_queue *, struct bio *);
|
||||
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
||||
@ -992,15 +930,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_start_queue_async(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue_uncond(struct request_queue *q);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern void blk_run_queue_async(struct request_queue *q);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, void __user *, unsigned long,
|
||||
gfp_t);
|
||||
@ -1155,13 +1085,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
|
||||
return nr_bios;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request issue related functions.
|
||||
*/
|
||||
extern struct request *blk_peek_request(struct request_queue *q);
|
||||
extern void blk_start_request(struct request *rq);
|
||||
extern struct request *blk_fetch_request(struct request_queue *q);
|
||||
|
||||
void blk_steal_bios(struct bio_list *list, struct request *rq);
|
||||
|
||||
/*
|
||||
@ -1179,9 +1102,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
|
||||
*/
|
||||
extern bool blk_update_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
extern void blk_finish_request(struct request *rq, blk_status_t error);
|
||||
extern bool blk_end_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
extern void blk_end_request_all(struct request *rq, blk_status_t error);
|
||||
extern bool __blk_end_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
@ -1190,15 +1110,10 @@ extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
|
||||
|
||||
extern void __blk_complete_request(struct request *);
|
||||
extern void blk_abort_request(struct request *);
|
||||
extern void blk_unprep_request(struct request *);
|
||||
|
||||
/*
|
||||
* Access functions for manipulating queue properties
|
||||
*/
|
||||
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
|
||||
spinlock_t *lock, int node_id);
|
||||
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
|
||||
extern int blk_init_allocated_queue(struct request_queue *);
|
||||
extern void blk_cleanup_queue(struct request_queue *);
|
||||
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||
@ -1239,8 +1154,6 @@ extern int blk_queue_dma_drain(struct request_queue *q,
|
||||
void *buf, unsigned int size);
|
||||
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
|
||||
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
|
||||
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
||||
@ -1298,7 +1211,6 @@ extern void blk_set_queue_dying(struct request_queue *);
|
||||
* schedule() where blk_schedule_flush_plug() is called.
|
||||
*/
|
||||
struct blk_plug {
|
||||
struct list_head list; /* requests */
|
||||
struct list_head mq_list; /* blk-mq requests */
|
||||
struct list_head cb_list; /* md requires an unplug callback */
|
||||
};
|
||||
@ -1339,8 +1251,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||
struct blk_plug *plug = tsk->plug;
|
||||
|
||||
return plug &&
|
||||
(!list_empty(&plug->list) ||
|
||||
!list_empty(&plug->mq_list) ||
|
||||
(!list_empty(&plug->mq_list) ||
|
||||
!list_empty(&plug->cb_list));
|
||||
}
|
||||
|
||||
|
@ -23,74 +23,6 @@ enum elv_merge {
|
||||
ELEVATOR_DISCARD_MERGE = 3,
|
||||
};
|
||||
|
||||
typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
|
||||
struct bio *);
|
||||
|
||||
typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
|
||||
|
||||
typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
|
||||
|
||||
typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
|
||||
struct request *, struct bio *);
|
||||
|
||||
typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
|
||||
struct request *, struct request *);
|
||||
|
||||
typedef void (elevator_bio_merged_fn) (struct request_queue *,
|
||||
struct request *, struct bio *);
|
||||
|
||||
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
|
||||
|
||||
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
|
||||
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
|
||||
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
|
||||
typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
|
||||
|
||||
typedef void (elevator_init_icq_fn) (struct io_cq *);
|
||||
typedef void (elevator_exit_icq_fn) (struct io_cq *);
|
||||
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
|
||||
struct bio *, gfp_t);
|
||||
typedef void (elevator_put_req_fn) (struct request *);
|
||||
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
|
||||
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
||||
|
||||
typedef int (elevator_init_fn) (struct request_queue *,
|
||||
struct elevator_type *e);
|
||||
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
||||
typedef void (elevator_registered_fn) (struct request_queue *);
|
||||
|
||||
struct elevator_ops
|
||||
{
|
||||
elevator_merge_fn *elevator_merge_fn;
|
||||
elevator_merged_fn *elevator_merged_fn;
|
||||
elevator_merge_req_fn *elevator_merge_req_fn;
|
||||
elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
|
||||
elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
|
||||
elevator_bio_merged_fn *elevator_bio_merged_fn;
|
||||
|
||||
elevator_dispatch_fn *elevator_dispatch_fn;
|
||||
elevator_add_req_fn *elevator_add_req_fn;
|
||||
elevator_activate_req_fn *elevator_activate_req_fn;
|
||||
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
|
||||
|
||||
elevator_completed_req_fn *elevator_completed_req_fn;
|
||||
|
||||
elevator_request_list_fn *elevator_former_req_fn;
|
||||
elevator_request_list_fn *elevator_latter_req_fn;
|
||||
|
||||
elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
|
||||
elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
|
||||
|
||||
elevator_set_req_fn *elevator_set_req_fn;
|
||||
elevator_put_req_fn *elevator_put_req_fn;
|
||||
|
||||
elevator_may_queue_fn *elevator_may_queue_fn;
|
||||
|
||||
elevator_init_fn *elevator_init_fn;
|
||||
elevator_exit_fn *elevator_exit_fn;
|
||||
elevator_registered_fn *elevator_registered_fn;
|
||||
};
|
||||
|
||||
struct blk_mq_alloc_data;
|
||||
struct blk_mq_hw_ctx;
|
||||
|
||||
@ -138,16 +70,15 @@ struct elevator_type
|
||||
|
||||
/* fields provided by elevator implementation */
|
||||
union {
|
||||
struct elevator_ops sq;
|
||||
struct elevator_mq_ops mq;
|
||||
} ops;
|
||||
|
||||
size_t icq_size; /* see iocontext.h */
|
||||
size_t icq_align; /* ditto */
|
||||
struct elv_fs_entry *elevator_attrs;
|
||||
char elevator_name[ELV_NAME_MAX];
|
||||
const char *elevator_alias;
|
||||
struct module *elevator_owner;
|
||||
bool uses_mq;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
|
||||
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
|
||||
@ -175,40 +106,25 @@ struct elevator_queue
|
||||
struct kobject kobj;
|
||||
struct mutex sysfs_lock;
|
||||
unsigned int registered:1;
|
||||
unsigned int uses_mq:1;
|
||||
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
|
||||
};
|
||||
|
||||
/*
|
||||
* block elevator interface
|
||||
*/
|
||||
extern void elv_dispatch_sort(struct request_queue *, struct request *);
|
||||
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
|
||||
extern void elv_add_request(struct request_queue *, struct request *, int);
|
||||
extern void __elv_add_request(struct request_queue *, struct request *, int);
|
||||
extern enum elv_merge elv_merge(struct request_queue *, struct request **,
|
||||
struct bio *);
|
||||
extern void elv_merge_requests(struct request_queue *, struct request *,
|
||||
struct request *);
|
||||
extern void elv_merged_request(struct request_queue *, struct request *,
|
||||
enum elv_merge);
|
||||
extern void elv_bio_merged(struct request_queue *q, struct request *,
|
||||
struct bio *);
|
||||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
|
||||
extern void elv_requeue_request(struct request_queue *, struct request *);
|
||||
extern struct request *elv_former_request(struct request_queue *, struct request *);
|
||||
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
||||
extern int elv_may_queue(struct request_queue *, unsigned int);
|
||||
extern void elv_completed_request(struct request_queue *, struct request *);
|
||||
extern int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask);
|
||||
extern void elv_put_request(struct request_queue *, struct request *);
|
||||
extern void elv_drain_elevator(struct request_queue *);
|
||||
|
||||
/*
|
||||
* io scheduler registration
|
||||
*/
|
||||
extern void __init load_default_elevator_module(void);
|
||||
extern int elv_register(struct elevator_type *);
|
||||
extern void elv_unregister(struct elevator_type *);
|
||||
|
||||
@ -260,9 +176,5 @@ enum {
|
||||
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
|
||||
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
static inline void load_default_elevator_module(void) { }
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
#endif
|
||||
|
@ -146,7 +146,6 @@ extern unsigned int reset_devices;
|
||||
/* used by init/main.c */
|
||||
void setup_arch(char **);
|
||||
void prepare_namespace(void);
|
||||
void __init load_default_modules(void);
|
||||
int __init init_rootfs(void);
|
||||
|
||||
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
|
||||
|
@ -53,9 +53,6 @@ static void __init handle_initrd(void)
|
||||
ksys_mkdir("/old", 0700);
|
||||
ksys_chdir("/old");
|
||||
|
||||
/* try loading default modules from initrd */
|
||||
load_default_modules();
|
||||
|
||||
/*
|
||||
* In case that a resume from disk is carried out by linuxrc or one of
|
||||
* its children, we need to tell the freezer not to wait for us.
|
||||
|
@ -644,12 +644,6 @@ static int __init populate_rootfs(void)
|
||||
#endif
|
||||
}
|
||||
flush_delayed_fput();
|
||||
/*
|
||||
* Try loading default modules from initramfs. This gives
|
||||
* us a chance to load before device_initcalls.
|
||||
*/
|
||||
load_default_modules();
|
||||
|
||||
return 0;
|
||||
}
|
||||
rootfs_initcall(populate_rootfs);
|
||||
|
12
init/main.c
12
init/main.c
@ -996,17 +996,6 @@ static void __init do_pre_smp_initcalls(void)
|
||||
do_one_initcall(initcall_from_entry(fn));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function requests modules which should be loaded by default and is
|
||||
* called twice right after initrd is mounted and right before init is
|
||||
* exec'd. If such modules are on either initrd or rootfs, they will be
|
||||
* loaded before control is passed to userland.
|
||||
*/
|
||||
void __init load_default_modules(void)
|
||||
{
|
||||
load_default_elevator_module();
|
||||
}
|
||||
|
||||
static int run_init_process(const char *init_filename)
|
||||
{
|
||||
argv_init[0] = init_filename;
|
||||
@ -1180,5 +1169,4 @@ static noinline void __init kernel_init_freeable(void)
|
||||
*/
|
||||
|
||||
integrity_load_keys();
|
||||
load_default_modules();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user