2019-04-30 18:42:39 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-01-25 16:06:40 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 Facebook
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
|
2017-01-31 22:53:20 +00:00
|
|
|
#include "blk.h"
|
2017-01-25 16:06:40 +00:00
|
|
|
#include "blk-mq.h"
|
2017-05-04 07:31:30 +00:00
|
|
|
#include "blk-mq-debugfs.h"
|
2021-11-23 18:53:08 +00:00
|
|
|
#include "blk-mq-sched.h"
|
2018-12-17 01:46:00 +00:00
|
|
|
#include "blk-rq-qos.h"
|
2017-01-25 16:06:40 +00:00
|
|
|
|
2018-02-28 00:32:13 +00:00
|
|
|
static int queue_poll_stat_show(void *data, struct seq_file *m)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
|
|
|
|
__acquires(&q->requeue_lock)
|
|
|
|
{
|
|
|
|
struct request_queue *q = m->private;
|
|
|
|
|
|
|
|
spin_lock_irq(&q->requeue_lock);
|
|
|
|
return seq_list_start(&q->requeue_list, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct request_queue *q = m->private;
|
|
|
|
|
|
|
|
return seq_list_next(v, &q->requeue_list, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void queue_requeue_list_stop(struct seq_file *m, void *v)
|
|
|
|
__releases(&q->requeue_lock)
|
|
|
|
{
|
|
|
|
struct request_queue *q = m->private;
|
|
|
|
|
|
|
|
spin_unlock_irq(&q->requeue_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations queue_requeue_list_seq_ops = {
|
|
|
|
.start = queue_requeue_list_start,
|
|
|
|
.next = queue_requeue_list_next,
|
|
|
|
.stop = queue_requeue_list_stop,
|
|
|
|
.show = blk_mq_debugfs_rq_show,
|
|
|
|
};
|
|
|
|
|
2017-04-10 22:13:15 +00:00
|
|
|
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
|
|
|
|
const char *const *flag_name, int flag_name_count)
|
|
|
|
{
|
|
|
|
bool sep = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
|
|
|
|
if (!(flags & BIT(i)))
|
|
|
|
continue;
|
|
|
|
if (sep)
|
2017-05-04 07:31:23 +00:00
|
|
|
seq_puts(m, "|");
|
2017-04-10 22:13:15 +00:00
|
|
|
sep = true;
|
|
|
|
if (i < flag_name_count && flag_name[i])
|
|
|
|
seq_puts(m, flag_name[i]);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%d", i);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-26 21:01:04 +00:00
|
|
|
static int queue_pm_only_show(void *data, struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct request_queue *q = data;
|
|
|
|
|
|
|
|
seq_printf(m, "%d\n", atomic_read(&q->pm_only));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
|
2017-04-10 22:13:15 +00:00
|
|
|
static const char *const blk_queue_flag_name[] = {
|
2017-05-04 07:31:24 +00:00
|
|
|
QUEUE_FLAG_NAME(STOPPED),
|
|
|
|
QUEUE_FLAG_NAME(DYING),
|
|
|
|
QUEUE_FLAG_NAME(NOMERGES),
|
|
|
|
QUEUE_FLAG_NAME(SAME_COMP),
|
|
|
|
QUEUE_FLAG_NAME(FAIL_IO),
|
|
|
|
QUEUE_FLAG_NAME(NONROT),
|
|
|
|
QUEUE_FLAG_NAME(IO_STAT),
|
|
|
|
QUEUE_FLAG_NAME(NOXMERGES),
|
|
|
|
QUEUE_FLAG_NAME(ADD_RANDOM),
|
2023-05-18 22:27:08 +00:00
|
|
|
QUEUE_FLAG_NAME(SYNCHRONOUS),
|
2017-05-04 07:31:24 +00:00
|
|
|
QUEUE_FLAG_NAME(SAME_FORCE),
|
|
|
|
QUEUE_FLAG_NAME(INIT_DONE),
|
2020-09-24 06:51:38 +00:00
|
|
|
QUEUE_FLAG_NAME(STABLE_WRITES),
|
2017-05-04 07:31:24 +00:00
|
|
|
QUEUE_FLAG_NAME(POLL),
|
|
|
|
QUEUE_FLAG_NAME(WC),
|
|
|
|
QUEUE_FLAG_NAME(FUA),
|
|
|
|
QUEUE_FLAG_NAME(DAX),
|
|
|
|
QUEUE_FLAG_NAME(STATS),
|
|
|
|
QUEUE_FLAG_NAME(REGISTERED),
|
2017-08-18 22:52:54 +00:00
|
|
|
QUEUE_FLAG_NAME(QUIESCED),
|
2020-04-28 01:54:56 +00:00
|
|
|
QUEUE_FLAG_NAME(PCI_P2PDMA),
|
|
|
|
QUEUE_FLAG_NAME(ZONE_RESETALL),
|
|
|
|
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
|
2021-10-04 07:22:07 +00:00
|
|
|
QUEUE_FLAG_NAME(HCTX_ACTIVE),
|
2020-12-28 19:27:18 +00:00
|
|
|
QUEUE_FLAG_NAME(NOWAIT),
|
2023-05-18 22:27:08 +00:00
|
|
|
QUEUE_FLAG_NAME(SQ_SCHED),
|
|
|
|
QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE),
|
2017-04-10 22:13:15 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef QUEUE_FLAG_NAME
|
2017-04-10 22:13:15 +00:00
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int queue_state_show(void *data, struct seq_file *m)
|
2017-04-10 22:13:15 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct request_queue *q = data;
|
2017-04-10 22:13:15 +00:00
|
|
|
|
|
|
|
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
|
|
|
|
ARRAY_SIZE(blk_queue_flag_name));
|
2017-04-26 20:47:54 +00:00
|
|
|
seq_puts(m, "\n");
|
2017-04-10 22:13:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static ssize_t queue_state_write(void *data, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
2017-04-10 22:13:15 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct request_queue *q = data;
|
2017-05-04 07:31:26 +00:00
|
|
|
char opbuf[16] = { }, *op;
|
2017-04-10 22:13:15 +00:00
|
|
|
|
2017-05-04 07:31:29 +00:00
|
|
|
/*
|
2022-06-19 06:05:49 +00:00
|
|
|
* The "state" attribute is removed when the queue is removed. Don't
|
|
|
|
* allow setting the state on a dying queue to avoid a use-after-free.
|
2017-05-04 07:31:29 +00:00
|
|
|
*/
|
2022-06-19 06:05:49 +00:00
|
|
|
if (blk_queue_dying(q))
|
2017-05-04 07:31:29 +00:00
|
|
|
return -ENOENT;
|
|
|
|
|
2017-05-04 07:31:26 +00:00
|
|
|
if (count >= sizeof(opbuf)) {
|
2017-05-04 07:31:25 +00:00
|
|
|
pr_err("%s: operation too long\n", __func__);
|
|
|
|
goto inval;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:26 +00:00
|
|
|
if (copy_from_user(opbuf, buf, count))
|
2017-04-10 22:13:15 +00:00
|
|
|
return -EFAULT;
|
2017-05-04 07:31:26 +00:00
|
|
|
op = strstrip(opbuf);
|
2017-04-10 22:13:15 +00:00
|
|
|
if (strcmp(op, "run") == 0) {
|
|
|
|
blk_mq_run_hw_queues(q, true);
|
|
|
|
} else if (strcmp(op, "start") == 0) {
|
|
|
|
blk_mq_start_stopped_hw_queues(q, true);
|
2017-06-01 15:55:13 +00:00
|
|
|
} else if (strcmp(op, "kick") == 0) {
|
|
|
|
blk_mq_kick_requeue_list(q);
|
2017-04-10 22:13:15 +00:00
|
|
|
} else {
|
2017-05-04 07:31:25 +00:00
|
|
|
pr_err("%s: unsupported operation '%s'\n", __func__, op);
|
|
|
|
inval:
|
2017-06-01 15:55:13 +00:00
|
|
|
pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
|
2017-04-10 22:13:15 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-05-04 07:31:25 +00:00
|
|
|
return count;
|
2017-04-10 22:13:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-28 00:32:13 +00:00
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
|
|
|
{ "poll_stat", 0400, queue_poll_stat_show },
|
|
|
|
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
|
2018-09-26 21:01:04 +00:00
|
|
|
{ "pm_only", 0600, queue_pm_only_show, NULL },
|
2018-02-28 00:32:13 +00:00
|
|
|
{ "state", 0600, queue_state_show, queue_state_write },
|
2018-02-28 00:32:14 +00:00
|
|
|
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
|
2018-02-28 00:32:13 +00:00
|
|
|
{ },
|
|
|
|
};
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 15:56:08 +00:00
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
|
2017-03-30 18:21:27 +00:00
|
|
|
static const char *const hctx_state_name[] = {
|
2017-05-04 07:31:24 +00:00
|
|
|
HCTX_STATE_NAME(STOPPED),
|
|
|
|
HCTX_STATE_NAME(TAG_ACTIVE),
|
|
|
|
HCTX_STATE_NAME(SCHED_RESTART),
|
2020-05-29 13:53:15 +00:00
|
|
|
HCTX_STATE_NAME(INACTIVE),
|
2017-03-30 18:21:27 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef HCTX_STATE_NAME
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_state_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:41 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:41 +00:00
|
|
|
|
2017-03-30 18:21:27 +00:00
|
|
|
blk_flags_show(m, hctx->state, hctx_state_name,
|
|
|
|
ARRAY_SIZE(hctx_state_name));
|
2017-04-26 20:47:54 +00:00
|
|
|
seq_puts(m, "\n");
|
2017-01-25 16:06:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
|
2017-03-30 18:21:27 +00:00
|
|
|
static const char *const alloc_policy_name[] = {
|
2017-05-04 07:31:24 +00:00
|
|
|
BLK_TAG_ALLOC_NAME(FIFO),
|
|
|
|
BLK_TAG_ALLOC_NAME(RR),
|
2017-03-30 18:21:27 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef BLK_TAG_ALLOC_NAME
|
2017-03-30 18:21:27 +00:00
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
|
2017-03-30 18:21:27 +00:00
|
|
|
static const char *const hctx_flag_name[] = {
|
2017-05-04 07:31:24 +00:00
|
|
|
HCTX_FLAG_NAME(SHOULD_MERGE),
|
2020-08-19 15:20:19 +00:00
|
|
|
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
|
2017-05-04 07:31:24 +00:00
|
|
|
HCTX_FLAG_NAME(BLOCKING),
|
|
|
|
HCTX_FLAG_NAME(NO_SCHED),
|
2020-05-29 13:53:15 +00:00
|
|
|
HCTX_FLAG_NAME(STACKING),
|
2021-01-08 08:55:37 +00:00
|
|
|
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
|
2017-03-30 18:21:27 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef HCTX_FLAG_NAME
|
2017-03-30 18:21:27 +00:00
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_flags_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:41 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-03-30 18:21:27 +00:00
|
|
|
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
|
2017-01-25 16:06:41 +00:00
|
|
|
|
2017-03-30 18:21:27 +00:00
|
|
|
seq_puts(m, "alloc_policy=");
|
|
|
|
if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
|
|
|
|
alloc_policy_name[alloc_policy])
|
|
|
|
seq_puts(m, alloc_policy_name[alloc_policy]);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%d", alloc_policy);
|
|
|
|
seq_puts(m, " ");
|
|
|
|
blk_flags_show(m,
|
|
|
|
hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
|
|
|
|
hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
|
2017-04-26 20:47:54 +00:00
|
|
|
seq_puts(m, "\n");
|
2017-01-25 16:06:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
|
2017-04-26 20:47:55 +00:00
|
|
|
static const char *const cmd_flag_name[] = {
|
2017-05-04 07:31:24 +00:00
|
|
|
CMD_FLAG_NAME(FAILFAST_DEV),
|
|
|
|
CMD_FLAG_NAME(FAILFAST_TRANSPORT),
|
|
|
|
CMD_FLAG_NAME(FAILFAST_DRIVER),
|
|
|
|
CMD_FLAG_NAME(SYNC),
|
|
|
|
CMD_FLAG_NAME(META),
|
|
|
|
CMD_FLAG_NAME(PRIO),
|
|
|
|
CMD_FLAG_NAME(NOMERGE),
|
|
|
|
CMD_FLAG_NAME(IDLE),
|
|
|
|
CMD_FLAG_NAME(INTEGRITY),
|
|
|
|
CMD_FLAG_NAME(FUA),
|
|
|
|
CMD_FLAG_NAME(PREFLUSH),
|
|
|
|
CMD_FLAG_NAME(RAHEAD),
|
|
|
|
CMD_FLAG_NAME(BACKGROUND),
|
2017-08-18 22:52:54 +00:00
|
|
|
CMD_FLAG_NAME(NOWAIT),
|
2019-01-24 10:28:55 +00:00
|
|
|
CMD_FLAG_NAME(NOUNMAP),
|
2021-10-12 11:12:21 +00:00
|
|
|
CMD_FLAG_NAME(POLLED),
|
2017-04-26 20:47:55 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef CMD_FLAG_NAME
|
2017-04-26 20:47:55 +00:00
|
|
|
|
2017-05-04 07:31:24 +00:00
|
|
|
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
|
2017-04-26 20:47:55 +00:00
|
|
|
static const char *const rqf_name[] = {
|
2018-01-12 21:47:57 +00:00
|
|
|
RQF_NAME(STARTED),
|
2017-05-04 07:31:24 +00:00
|
|
|
RQF_NAME(SOFTBARRIER),
|
|
|
|
RQF_NAME(FLUSH_SEQ),
|
|
|
|
RQF_NAME(MIXED_MERGE),
|
|
|
|
RQF_NAME(MQ_INFLIGHT),
|
|
|
|
RQF_NAME(DONTPREP),
|
|
|
|
RQF_NAME(FAILED),
|
|
|
|
RQF_NAME(QUIET),
|
|
|
|
RQF_NAME(IO_STAT),
|
|
|
|
RQF_NAME(PM),
|
|
|
|
RQF_NAME(HASHED),
|
|
|
|
RQF_NAME(STATS),
|
|
|
|
RQF_NAME(SPECIAL_PAYLOAD),
|
2018-01-10 18:30:08 +00:00
|
|
|
RQF_NAME(ZONE_WRITE_LOCKED),
|
2022-09-08 23:26:59 +00:00
|
|
|
RQF_NAME(TIMED_OUT),
|
2021-11-02 13:35:01 +00:00
|
|
|
RQF_NAME(ELV),
|
2022-09-08 23:26:59 +00:00
|
|
|
RQF_NAME(RESV),
|
2017-04-26 20:47:55 +00:00
|
|
|
};
|
2017-05-04 07:31:24 +00:00
|
|
|
#undef RQF_NAME
|
2017-04-26 20:47:55 +00:00
|
|
|
|
2018-03-16 17:31:11 +00:00
|
|
|
static const char *const blk_mq_rq_state_name_array[] = {
|
|
|
|
[MQ_RQ_IDLE] = "idle",
|
|
|
|
[MQ_RQ_IN_FLIGHT] = "in_flight",
|
|
|
|
[MQ_RQ_COMPLETE] = "complete",
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
|
|
|
|
{
|
2018-06-20 10:45:05 +00:00
|
|
|
if (WARN_ON_ONCE((unsigned int)rq_state >=
|
2018-03-16 17:31:11 +00:00
|
|
|
ARRAY_SIZE(blk_mq_rq_state_name_array)))
|
|
|
|
return "(?)";
|
|
|
|
return blk_mq_rq_state_name_array[rq_state];
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:34 +00:00
|
|
|
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
2017-01-25 16:06:42 +00:00
|
|
|
{
|
2017-04-26 20:47:56 +00:00
|
|
|
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
|
2022-07-14 18:06:28 +00:00
|
|
|
const enum req_op op = req_op(rq);
|
2019-06-20 17:59:17 +00:00
|
|
|
const char *op_str = blk_op_str(op);
|
2017-01-25 16:06:42 +00:00
|
|
|
|
2017-04-26 20:47:55 +00:00
|
|
|
seq_printf(m, "%p {.op=", rq);
|
2019-06-20 17:59:17 +00:00
|
|
|
if (strcmp(op_str, "UNKNOWN") == 0)
|
2019-06-19 22:01:49 +00:00
|
|
|
seq_printf(m, "%u", op);
|
2017-04-26 20:47:55 +00:00
|
|
|
else
|
2019-06-20 17:59:17 +00:00
|
|
|
seq_printf(m, "%s", op_str);
|
2017-04-26 20:47:55 +00:00
|
|
|
seq_puts(m, ", .cmd_flags=");
|
2022-07-14 18:06:32 +00:00
|
|
|
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
|
|
|
|
cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
|
2017-04-26 20:47:55 +00:00
|
|
|
seq_puts(m, ", .rq_flags=");
|
|
|
|
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
|
|
|
|
ARRAY_SIZE(rqf_name));
|
2018-03-16 17:31:11 +00:00
|
|
|
seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
|
2017-04-26 20:47:56 +00:00
|
|
|
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
|
2017-04-26 20:47:55 +00:00
|
|
|
rq->internal_tag);
|
2017-04-26 20:47:56 +00:00
|
|
|
if (mq_ops->show_rq)
|
|
|
|
mq_ops->show_rq(m, rq);
|
|
|
|
seq_puts(m, "}\n");
|
2017-01-25 16:06:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-05-04 07:31:34 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
|
|
|
|
|
|
|
|
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
|
|
|
|
}
|
2017-05-04 07:31:33 +00:00
|
|
|
EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
|
2017-01-25 16:06:42 +00:00
|
|
|
|
|
|
|
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
|
2017-02-01 18:20:56 +00:00
|
|
|
__acquires(&hctx->lock)
|
2017-01-25 16:06:42 +00:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
spin_lock(&hctx->lock);
|
|
|
|
return seq_list_start(&hctx->dispatch, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
return seq_list_next(v, &hctx->dispatch, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hctx_dispatch_stop(struct seq_file *m, void *v)
|
2017-02-01 18:20:56 +00:00
|
|
|
__releases(&hctx->lock)
|
2017-01-25 16:06:42 +00:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
spin_unlock(&hctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations hctx_dispatch_seq_ops = {
|
|
|
|
.start = hctx_dispatch_start,
|
|
|
|
.next = hctx_dispatch_next,
|
|
|
|
.stop = hctx_dispatch_stop,
|
|
|
|
.show = blk_mq_debugfs_rq_show,
|
|
|
|
};
|
|
|
|
|
2017-06-01 15:55:12 +00:00
|
|
|
struct show_busy_params {
|
|
|
|
struct seq_file *m;
|
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: the state of a request may change while this function is in progress,
|
2018-11-08 17:24:07 +00:00
|
|
|
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
|
|
|
|
* keep iterating requests.
|
2017-06-01 15:55:12 +00:00
|
|
|
*/
|
2022-07-06 12:03:53 +00:00
|
|
|
static bool hctx_show_busy_rq(struct request *rq, void *data)
|
2017-06-01 15:55:12 +00:00
|
|
|
{
|
|
|
|
const struct show_busy_params *params = data;
|
|
|
|
|
2018-10-29 21:06:13 +00:00
|
|
|
if (rq->mq_hctx == params->hctx)
|
2020-04-27 13:12:50 +00:00
|
|
|
__blk_mq_debugfs_rq_show(params->m, rq);
|
2018-11-08 17:24:07 +00:00
|
|
|
|
|
|
|
return true;
|
2017-06-01 15:55:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_busy_show(void *data, struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
|
|
|
struct show_busy_params params = { .m = m, .hctx = hctx };
|
|
|
|
|
|
|
|
blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
|
|
|
|
¶ms);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-17 10:42:48 +00:00
|
|
|
static const char *const hctx_types[] = {
|
|
|
|
[HCTX_TYPE_DEFAULT] = "default",
|
|
|
|
[HCTX_TYPE_READ] = "read",
|
|
|
|
[HCTX_TYPE_POLL] = "poll",
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hctx_type_show(void *data, struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
|
|
|
|
seq_printf(m, "%s\n", hctx_types[hctx->type]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_ctx_map_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:45 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:45 +00:00
|
|
|
|
|
|
|
sbitmap_bitmap_show(&hctx->ctx_map, m);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:06:46 +00:00
|
|
|
static void blk_mq_debugfs_tags_show(struct seq_file *m,
|
|
|
|
struct blk_mq_tags *tags)
|
|
|
|
{
|
|
|
|
seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
|
|
|
|
seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
|
|
|
|
seq_printf(m, "active_queues=%d\n",
|
|
|
|
atomic_read(&tags->active_queues));
|
|
|
|
|
|
|
|
seq_puts(m, "\nbitmap_tags:\n");
|
2021-10-05 10:23:38 +00:00
|
|
|
sbitmap_queue_show(&tags->bitmap_tags, m);
|
2017-01-25 16:06:46 +00:00
|
|
|
|
|
|
|
if (tags->nr_reserved_tags) {
|
|
|
|
seq_puts(m, "\nbreserved_tags:\n");
|
2021-10-05 10:23:38 +00:00
|
|
|
sbitmap_queue_show(&tags->breserved_tags, m);
|
2017-01-25 16:06:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_tags_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:46 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:46 +00:00
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-01 18:20:58 +00:00
|
|
|
int res;
|
2017-01-25 16:06:46 +00:00
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-25 16:06:46 +00:00
|
|
|
if (hctx->tags)
|
|
|
|
blk_mq_debugfs_tags_show(m, hctx->tags);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
out:
|
|
|
|
return res;
|
2017-01-25 16:06:46 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:47 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:47 +00:00
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-01 18:20:58 +00:00
|
|
|
int res;
|
2017-01-25 16:06:47 +00:00
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-25 16:06:47 +00:00
|
|
|
if (hctx->tags)
|
2021-10-05 10:23:38 +00:00
|
|
|
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
|
2017-01-25 16:06:47 +00:00
|
|
|
mutex_unlock(&q->sysfs_lock);
|
2017-02-01 18:20:58 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
return res;
|
2017-01-25 16:06:47 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_sched_tags_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:46 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:46 +00:00
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-01 18:20:58 +00:00
|
|
|
int res;
|
2017-01-25 16:06:46 +00:00
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-25 16:06:46 +00:00
|
|
|
if (hctx->sched_tags)
|
|
|
|
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
out:
|
|
|
|
return res;
|
2017-01-25 16:06:46 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:47 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:47 +00:00
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-01 18:20:58 +00:00
|
|
|
int res;
|
2017-01-25 16:06:47 +00:00
|
|
|
|
2017-02-01 18:20:58 +00:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-25 16:06:47 +00:00
|
|
|
if (hctx->sched_tags)
|
2021-10-05 10:23:38 +00:00
|
|
|
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
|
2017-01-25 16:06:47 +00:00
|
|
|
mutex_unlock(&q->sysfs_lock);
|
2017-02-01 18:20:58 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
return res;
|
2017-01-25 16:06:47 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_run_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:49 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:49 +00:00
|
|
|
|
|
|
|
seq_printf(m, "%lu\n", hctx->run);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
|
|
|
|
loff_t *ppos)
|
2017-01-25 16:06:49 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:49 +00:00
|
|
|
|
|
|
|
hctx->run = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int hctx_active_show(void *data, struct seq_file *m)
|
2017-01-25 16:06:49 +00:00
|
|
|
{
|
2017-05-04 07:31:28 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
2017-01-25 16:06:49 +00:00
|
|
|
|
2021-10-29 08:40:23 +00:00
|
|
|
seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
|
2017-01-25 16:06:49 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-03 15:03:16 +00:00
|
|
|
static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = data;
|
|
|
|
|
|
|
|
seq_printf(m, "%u\n", hctx->dispatch_busy);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-17 15:44:05 +00:00
|
|
|
#define CTX_RQ_SEQ_OPS(name, type) \
|
|
|
|
static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
|
|
|
|
__acquires(&ctx->lock) \
|
|
|
|
{ \
|
|
|
|
struct blk_mq_ctx *ctx = m->private; \
|
|
|
|
\
|
|
|
|
spin_lock(&ctx->lock); \
|
|
|
|
return seq_list_start(&ctx->rq_lists[type], *pos); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
|
|
|
|
loff_t *pos) \
|
|
|
|
{ \
|
|
|
|
struct blk_mq_ctx *ctx = m->private; \
|
|
|
|
\
|
|
|
|
return seq_list_next(v, &ctx->rq_lists[type], pos); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
|
|
|
|
__releases(&ctx->lock) \
|
|
|
|
{ \
|
|
|
|
struct blk_mq_ctx *ctx = m->private; \
|
|
|
|
\
|
|
|
|
spin_unlock(&ctx->lock); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
|
|
|
|
.start = ctx_##name##_rq_list_start, \
|
|
|
|
.next = ctx_##name##_rq_list_next, \
|
|
|
|
.stop = ctx_##name##_rq_list_stop, \
|
|
|
|
.show = blk_mq_debugfs_rq_show, \
|
|
|
|
}
|
|
|
|
|
|
|
|
CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
|
|
|
|
CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
|
|
|
|
CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
|
2017-01-25 16:06:42 +00:00
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int blk_mq_debugfs_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
const struct blk_mq_debugfs_attr *attr = m->private;
|
|
|
|
void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
|
|
|
|
|
|
|
|
return attr->show(data, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
2017-01-25 16:06:49 +00:00
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
2017-05-04 07:31:28 +00:00
|
|
|
const struct blk_mq_debugfs_attr *attr = m->private;
|
|
|
|
void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
|
2017-01-25 16:06:49 +00:00
|
|
|
|
2018-01-23 17:20:00 +00:00
|
|
|
/*
|
|
|
|
* Attributes that only implement .seq_ops are read-only and 'attr' is
|
|
|
|
* the same with 'data' in this case.
|
|
|
|
*/
|
|
|
|
if (attr == data || !attr->write)
|
2017-05-04 07:31:28 +00:00
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return attr->write(data, buf, count, ppos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
const struct blk_mq_debugfs_attr *attr = inode->i_private;
|
|
|
|
void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
|
|
|
|
struct seq_file *m;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (attr->seq_ops) {
|
|
|
|
ret = seq_open(file, attr->seq_ops);
|
|
|
|
if (!ret) {
|
|
|
|
m = file->private_data;
|
|
|
|
m->private = data;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!attr->show))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return single_open(file, blk_mq_debugfs_show, inode->i_private);
|
2017-01-25 16:06:49 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:31:28 +00:00
|
|
|
static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
const struct blk_mq_debugfs_attr *attr = inode->i_private;
|
|
|
|
|
|
|
|
if (attr->show)
|
|
|
|
return single_release(inode, file);
|
2019-06-19 22:01:48 +00:00
|
|
|
|
|
|
|
return seq_release(inode, file);
|
2017-05-04 07:31:28 +00:00
|
|
|
}
|
|
|
|
|
2017-08-17 23:23:04 +00:00
|
|
|
static const struct file_operations blk_mq_debugfs_fops = {
|
2017-05-04 07:31:28 +00:00
|
|
|
.open = blk_mq_debugfs_open,
|
2017-01-25 16:06:49 +00:00
|
|
|
.read = seq_read,
|
2017-05-04 07:31:28 +00:00
|
|
|
.write = blk_mq_debugfs_write,
|
2017-01-25 16:06:49 +00:00
|
|
|
.llseek = seq_lseek,
|
2017-05-04 07:31:28 +00:00
|
|
|
.release = blk_mq_debugfs_release,
|
2017-01-25 16:06:49 +00:00
|
|
|
};
|
|
|
|
|
2017-01-25 16:06:40 +00:00
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
|
2017-05-04 07:31:28 +00:00
|
|
|
{"state", 0400, hctx_state_show},
|
|
|
|
{"flags", 0400, hctx_flags_show},
|
|
|
|
{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
|
2017-06-01 15:55:12 +00:00
|
|
|
{"busy", 0400, hctx_busy_show},
|
2017-05-04 07:31:28 +00:00
|
|
|
{"ctx_map", 0400, hctx_ctx_map_show},
|
|
|
|
{"tags", 0400, hctx_tags_show},
|
|
|
|
{"tags_bitmap", 0400, hctx_tags_bitmap_show},
|
|
|
|
{"sched_tags", 0400, hctx_sched_tags_show},
|
|
|
|
{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
|
|
|
|
{"run", 0600, hctx_run_show, hctx_run_write},
|
|
|
|
{"active", 0400, hctx_active_show},
|
2018-07-03 15:03:16 +00:00
|
|
|
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
|
2018-12-17 10:42:48 +00:00
|
|
|
{"type", 0400, hctx_type_show},
|
2017-02-01 18:20:59 +00:00
|
|
|
{},
|
2017-01-25 16:06:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
2018-12-17 15:44:05 +00:00
|
|
|
{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
|
|
|
|
{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
|
|
|
|
{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
|
2017-02-01 18:20:59 +00:00
|
|
|
{},
|
2017-01-25 16:06:40 +00:00
|
|
|
};
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
static void debugfs_create_files(struct dentry *parent, void *data,
|
2017-05-04 14:17:21 +00:00
|
|
|
const struct blk_mq_debugfs_attr *attr)
|
|
|
|
{
|
2019-01-23 13:48:54 +00:00
|
|
|
if (IS_ERR_OR_NULL(parent))
|
2019-06-12 12:30:19 +00:00
|
|
|
return;
|
2019-01-23 13:48:54 +00:00
|
|
|
|
2017-05-04 14:17:21 +00:00
|
|
|
d_inode(parent)->i_private = data;
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
for (; attr->name; attr++)
|
|
|
|
debugfs_create_file(attr->name, attr->mode, parent,
|
|
|
|
(void *)attr, &blk_mq_debugfs_fops);
|
2017-05-04 14:17:21 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register(struct request_queue *q)
|
2017-01-25 16:06:40 +00:00
|
|
|
{
|
2017-05-04 14:17:21 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx;
|
2022-03-08 07:32:18 +00:00
|
|
|
unsigned long i;
|
2017-05-04 14:17:21 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
|
2017-01-25 16:06:40 +00:00
|
|
|
|
2017-05-04 14:17:21 +00:00
|
|
|
/*
|
2017-10-03 21:57:16 +00:00
|
|
|
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
|
2017-05-04 14:17:21 +00:00
|
|
|
* didn't exist yet (because we don't know what to name the directory
|
|
|
|
* until the queue is registered to a gendisk).
|
|
|
|
*/
|
2017-10-03 21:57:16 +00:00
|
|
|
if (q->elevator && !q->sched_debugfs_dir)
|
|
|
|
blk_mq_debugfs_register_sched(q);
|
|
|
|
|
|
|
|
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
|
2017-05-04 14:17:21 +00:00
|
|
|
queue_for_each_hw_ctx(q, hctx, i) {
|
2019-06-12 12:30:19 +00:00
|
|
|
if (!hctx->debugfs_dir)
|
|
|
|
blk_mq_debugfs_register_hctx(q, hctx);
|
|
|
|
if (q->elevator && !hctx->sched_debugfs_dir)
|
|
|
|
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
2017-05-04 14:17:21 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 01:46:00 +00:00
|
|
|
if (q->rq_qos) {
|
|
|
|
struct rq_qos *rqos = q->rq_qos;
|
|
|
|
|
|
|
|
while (rqos) {
|
|
|
|
blk_mq_debugfs_register_rqos(rqos);
|
|
|
|
rqos = rqos->next;
|
|
|
|
}
|
|
|
|
}
|
2017-01-25 16:06:40 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
|
|
|
|
struct blk_mq_ctx *ctx)
|
2017-01-25 16:06:40 +00:00
|
|
|
{
|
|
|
|
struct dentry *ctx_dir;
|
|
|
|
char name[20];
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
|
2017-05-04 14:17:21 +00:00
|
|
|
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
|
2017-01-25 16:06:40 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
|
2017-01-25 16:06:40 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
|
|
|
struct blk_mq_hw_ctx *hctx)
|
2017-01-25 16:06:40 +00:00
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx;
|
|
|
|
char name[20];
|
|
|
|
int i;
|
|
|
|
|
2022-07-11 09:08:08 +00:00
|
|
|
if (!q->debugfs_dir)
|
|
|
|
return;
|
|
|
|
|
2017-05-04 07:31:27 +00:00
|
|
|
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
|
2017-05-04 14:17:21 +00:00
|
|
|
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
|
2017-01-25 16:06:40 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
|
2017-05-04 14:17:21 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
hctx_for_each_ctx(hctx, ctx, i)
|
|
|
|
blk_mq_debugfs_register_ctx(hctx, ctx);
|
2017-05-04 14:17:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
|
|
|
{
|
2022-06-14 07:48:25 +00:00
|
|
|
if (!hctx->queue->debugfs_dir)
|
|
|
|
return;
|
2017-05-04 14:17:21 +00:00
|
|
|
debugfs_remove_recursive(hctx->debugfs_dir);
|
2017-05-04 14:24:40 +00:00
|
|
|
hctx->sched_debugfs_dir = NULL;
|
2017-05-04 14:17:21 +00:00
|
|
|
hctx->debugfs_dir = NULL;
|
2017-01-25 16:06:40 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
2017-01-25 16:06:40 +00:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx;
|
2022-03-08 07:32:18 +00:00
|
|
|
unsigned long i;
|
2017-01-25 16:06:40 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
queue_for_each_hw_ctx(q, hctx, i)
|
|
|
|
blk_mq_debugfs_register_hctx(q, hctx);
|
2017-01-25 16:06:40 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 14:17:21 +00:00
|
|
|
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
2017-01-25 16:06:40 +00:00
|
|
|
{
|
2017-05-04 14:17:21 +00:00
|
|
|
struct blk_mq_hw_ctx *hctx;
|
2022-03-08 07:32:18 +00:00
|
|
|
unsigned long i;
|
2017-05-04 14:17:21 +00:00
|
|
|
|
|
|
|
queue_for_each_hw_ctx(q, hctx, i)
|
|
|
|
blk_mq_debugfs_unregister_hctx(hctx);
|
2017-01-25 16:06:40 +00:00
|
|
|
}
|
2017-05-04 14:24:40 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register_sched(struct request_queue *q)
|
2017-05-04 14:24:40 +00:00
|
|
|
{
|
|
|
|
struct elevator_type *e = q->elevator->type;
|
|
|
|
|
2022-06-14 07:48:25 +00:00
|
|
|
lockdep_assert_held(&q->debugfs_mutex);
|
|
|
|
|
2019-07-06 15:50:32 +00:00
|
|
|
/*
|
|
|
|
* If the parent directory has not been created yet, return, we will be
|
|
|
|
* called again later on and the directory/files will be created then.
|
|
|
|
*/
|
|
|
|
if (!q->debugfs_dir)
|
|
|
|
return;
|
|
|
|
|
2017-05-04 14:24:40 +00:00
|
|
|
if (!e->queue_debugfs_attrs)
|
2019-06-12 12:30:19 +00:00
|
|
|
return;
|
2017-05-04 14:24:40 +00:00
|
|
|
|
|
|
|
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
|
2017-05-04 14:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
|
|
|
{
|
2022-06-14 07:48:25 +00:00
|
|
|
lockdep_assert_held(&q->debugfs_mutex);
|
|
|
|
|
2017-05-04 14:24:40 +00:00
|
|
|
debugfs_remove_recursive(q->sched_debugfs_dir);
|
|
|
|
q->sched_debugfs_dir = NULL;
|
|
|
|
}
|
|
|
|
|
2021-06-18 00:44:43 +00:00
|
|
|
static const char *rq_qos_id_to_name(enum rq_qos_id id)
|
|
|
|
{
|
|
|
|
switch (id) {
|
|
|
|
case RQ_QOS_WBT:
|
|
|
|
return "wbt";
|
|
|
|
case RQ_QOS_LATENCY:
|
|
|
|
return "latency";
|
|
|
|
case RQ_QOS_COST:
|
|
|
|
return "cost";
|
|
|
|
}
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
|
2018-12-17 01:46:00 +00:00
|
|
|
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
|
|
|
{
|
2023-02-03 15:03:56 +00:00
|
|
|
lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
|
2022-06-14 07:48:25 +00:00
|
|
|
|
2023-02-03 15:03:56 +00:00
|
|
|
if (!rqos->disk->queue->debugfs_dir)
|
2022-06-14 07:48:25 +00:00
|
|
|
return;
|
2018-12-17 01:46:00 +00:00
|
|
|
debugfs_remove_recursive(rqos->debugfs_dir);
|
|
|
|
rqos->debugfs_dir = NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
2018-12-17 01:46:00 +00:00
|
|
|
{
|
2023-02-03 15:03:56 +00:00
|
|
|
struct request_queue *q = rqos->disk->queue;
|
2018-12-17 01:46:00 +00:00
|
|
|
const char *dir_name = rq_qos_id_to_name(rqos->id);
|
|
|
|
|
2022-06-14 07:48:25 +00:00
|
|
|
lockdep_assert_held(&q->debugfs_mutex);
|
|
|
|
|
2018-12-17 01:46:00 +00:00
|
|
|
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
|
2019-06-12 12:30:19 +00:00
|
|
|
return;
|
2018-12-17 01:46:00 +00:00
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
if (!q->rqos_debugfs_dir)
|
2018-12-17 01:46:00 +00:00
|
|
|
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
|
|
|
|
q->debugfs_dir);
|
|
|
|
|
2023-02-03 15:03:56 +00:00
|
|
|
rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
|
2018-12-17 01:46:00 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:30:19 +00:00
|
|
|
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
|
|
|
struct blk_mq_hw_ctx *hctx)
|
2017-05-04 14:24:40 +00:00
|
|
|
{
|
|
|
|
struct elevator_type *e = q->elevator->type;
|
|
|
|
|
2022-06-14 07:48:25 +00:00
|
|
|
lockdep_assert_held(&q->debugfs_mutex);
|
|
|
|
|
2021-04-07 17:59:58 +00:00
|
|
|
/*
|
|
|
|
* If the parent debugfs directory has not been created yet, return;
|
|
|
|
* We will be called again later on with appropriate parent debugfs
|
|
|
|
* directory from blk_register_queue()
|
|
|
|
*/
|
|
|
|
if (!hctx->debugfs_dir)
|
|
|
|
return;
|
|
|
|
|
2017-05-04 14:24:40 +00:00
|
|
|
if (!e->hctx_debugfs_attrs)
|
2019-06-12 12:30:19 +00:00
|
|
|
return;
|
2017-05-04 14:24:40 +00:00
|
|
|
|
|
|
|
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
|
|
|
|
hctx->debugfs_dir);
|
2019-06-12 12:30:19 +00:00
|
|
|
debugfs_create_files(hctx->sched_debugfs_dir, hctx,
|
|
|
|
e->hctx_debugfs_attrs);
|
2017-05-04 14:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
|
|
|
{
|
2022-06-14 07:48:25 +00:00
|
|
|
lockdep_assert_held(&hctx->queue->debugfs_mutex);
|
|
|
|
|
|
|
|
if (!hctx->queue->debugfs_dir)
|
|
|
|
return;
|
2017-05-04 14:24:40 +00:00
|
|
|
debugfs_remove_recursive(hctx->sched_debugfs_dir);
|
|
|
|
hctx->sched_debugfs_dir = NULL;
|
|
|
|
}
|