Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-debugfs.c
29264 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2017 Facebook
4
*/
5
6
#include <linux/kernel.h>
7
#include <linux/blkdev.h>
8
#include <linux/build_bug.h>
9
#include <linux/debugfs.h>
10
11
#include "blk.h"
12
#include "blk-mq.h"
13
#include "blk-mq-debugfs.h"
14
#include "blk-mq-sched.h"
15
#include "blk-rq-qos.h"
16
17
static int queue_poll_stat_show(void *data, struct seq_file *m)
18
{
19
return 0;
20
}
21
22
static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
23
__acquires(&q->requeue_lock)
24
{
25
struct request_queue *q = m->private;
26
27
spin_lock_irq(&q->requeue_lock);
28
return seq_list_start(&q->requeue_list, *pos);
29
}
30
31
static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
32
{
33
struct request_queue *q = m->private;
34
35
return seq_list_next(v, &q->requeue_list, pos);
36
}
37
38
static void queue_requeue_list_stop(struct seq_file *m, void *v)
39
__releases(&q->requeue_lock)
40
{
41
struct request_queue *q = m->private;
42
43
spin_unlock_irq(&q->requeue_lock);
44
}
45
46
static const struct seq_operations queue_requeue_list_seq_ops = {
47
.start = queue_requeue_list_start,
48
.next = queue_requeue_list_next,
49
.stop = queue_requeue_list_stop,
50
.show = blk_mq_debugfs_rq_show,
51
};
52
53
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
54
const char *const *flag_name, int flag_name_count)
55
{
56
bool sep = false;
57
int i;
58
59
for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
60
if (!(flags & BIT(i)))
61
continue;
62
if (sep)
63
seq_puts(m, "|");
64
sep = true;
65
if (i < flag_name_count && flag_name[i])
66
seq_puts(m, flag_name[i]);
67
else
68
seq_printf(m, "%d", i);
69
}
70
return 0;
71
}
72
73
static int queue_pm_only_show(void *data, struct seq_file *m)
74
{
75
struct request_queue *q = data;
76
77
seq_printf(m, "%d\n", atomic_read(&q->pm_only));
78
return 0;
79
}
80
81
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
82
static const char *const blk_queue_flag_name[] = {
83
QUEUE_FLAG_NAME(DYING),
84
QUEUE_FLAG_NAME(NOMERGES),
85
QUEUE_FLAG_NAME(SAME_COMP),
86
QUEUE_FLAG_NAME(FAIL_IO),
87
QUEUE_FLAG_NAME(NOXMERGES),
88
QUEUE_FLAG_NAME(SAME_FORCE),
89
QUEUE_FLAG_NAME(INIT_DONE),
90
QUEUE_FLAG_NAME(STATS),
91
QUEUE_FLAG_NAME(REGISTERED),
92
QUEUE_FLAG_NAME(QUIESCED),
93
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
94
QUEUE_FLAG_NAME(HCTX_ACTIVE),
95
QUEUE_FLAG_NAME(SQ_SCHED),
96
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
97
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
98
QUEUE_FLAG_NAME(QOS_ENABLED),
99
QUEUE_FLAG_NAME(BIO_ISSUE_TIME),
100
};
101
#undef QUEUE_FLAG_NAME
102
103
static int queue_state_show(void *data, struct seq_file *m)
104
{
105
struct request_queue *q = data;
106
107
BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX);
108
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
109
ARRAY_SIZE(blk_queue_flag_name));
110
seq_puts(m, "\n");
111
return 0;
112
}
113
114
static ssize_t queue_state_write(void *data, const char __user *buf,
115
size_t count, loff_t *ppos)
116
{
117
struct request_queue *q = data;
118
char opbuf[16] = { }, *op;
119
120
/*
121
* The "state" attribute is removed when the queue is removed. Don't
122
* allow setting the state on a dying queue to avoid a use-after-free.
123
*/
124
if (blk_queue_dying(q))
125
return -ENOENT;
126
127
if (count >= sizeof(opbuf)) {
128
pr_err("%s: operation too long\n", __func__);
129
goto inval;
130
}
131
132
if (copy_from_user(opbuf, buf, count))
133
return -EFAULT;
134
op = strstrip(opbuf);
135
if (strcmp(op, "run") == 0) {
136
blk_mq_run_hw_queues(q, true);
137
} else if (strcmp(op, "start") == 0) {
138
blk_mq_start_stopped_hw_queues(q, true);
139
} else if (strcmp(op, "kick") == 0) {
140
blk_mq_kick_requeue_list(q);
141
} else {
142
pr_err("%s: unsupported operation '%s'\n", __func__, op);
143
inval:
144
pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
145
return -EINVAL;
146
}
147
return count;
148
}
149
150
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
151
{ "poll_stat", 0400, queue_poll_stat_show },
152
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
153
{ "pm_only", 0600, queue_pm_only_show, NULL },
154
{ "state", 0600, queue_state_show, queue_state_write },
155
{ "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
156
{ },
157
};
158
159
#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
160
static const char *const hctx_state_name[] = {
161
HCTX_STATE_NAME(STOPPED),
162
HCTX_STATE_NAME(TAG_ACTIVE),
163
HCTX_STATE_NAME(SCHED_RESTART),
164
HCTX_STATE_NAME(INACTIVE),
165
};
166
#undef HCTX_STATE_NAME
167
168
static int hctx_state_show(void *data, struct seq_file *m)
169
{
170
struct blk_mq_hw_ctx *hctx = data;
171
172
BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX);
173
blk_flags_show(m, hctx->state, hctx_state_name,
174
ARRAY_SIZE(hctx_state_name));
175
seq_puts(m, "\n");
176
return 0;
177
}
178
179
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
180
static const char *const hctx_flag_name[] = {
181
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
182
HCTX_FLAG_NAME(STACKING),
183
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
184
HCTX_FLAG_NAME(BLOCKING),
185
HCTX_FLAG_NAME(TAG_RR),
186
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
187
};
188
#undef HCTX_FLAG_NAME
189
190
static int hctx_flags_show(void *data, struct seq_file *m)
191
{
192
struct blk_mq_hw_ctx *hctx = data;
193
194
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != ilog2(BLK_MQ_F_MAX));
195
196
blk_flags_show(m, hctx->flags, hctx_flag_name,
197
ARRAY_SIZE(hctx_flag_name));
198
seq_puts(m, "\n");
199
return 0;
200
}
201
202
#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
203
static const char *const cmd_flag_name[] = {
204
CMD_FLAG_NAME(FAILFAST_DEV),
205
CMD_FLAG_NAME(FAILFAST_TRANSPORT),
206
CMD_FLAG_NAME(FAILFAST_DRIVER),
207
CMD_FLAG_NAME(SYNC),
208
CMD_FLAG_NAME(META),
209
CMD_FLAG_NAME(PRIO),
210
CMD_FLAG_NAME(NOMERGE),
211
CMD_FLAG_NAME(IDLE),
212
CMD_FLAG_NAME(INTEGRITY),
213
CMD_FLAG_NAME(FUA),
214
CMD_FLAG_NAME(PREFLUSH),
215
CMD_FLAG_NAME(RAHEAD),
216
CMD_FLAG_NAME(BACKGROUND),
217
CMD_FLAG_NAME(NOWAIT),
218
CMD_FLAG_NAME(POLLED),
219
CMD_FLAG_NAME(ALLOC_CACHE),
220
CMD_FLAG_NAME(SWAP),
221
CMD_FLAG_NAME(DRV),
222
CMD_FLAG_NAME(FS_PRIVATE),
223
CMD_FLAG_NAME(ATOMIC),
224
CMD_FLAG_NAME(NOUNMAP),
225
};
226
#undef CMD_FLAG_NAME
227
228
#define RQF_NAME(name) [__RQF_##name] = #name
229
static const char *const rqf_name[] = {
230
RQF_NAME(STARTED),
231
RQF_NAME(FLUSH_SEQ),
232
RQF_NAME(MIXED_MERGE),
233
RQF_NAME(DONTPREP),
234
RQF_NAME(SCHED_TAGS),
235
RQF_NAME(USE_SCHED),
236
RQF_NAME(FAILED),
237
RQF_NAME(QUIET),
238
RQF_NAME(IO_STAT),
239
RQF_NAME(PM),
240
RQF_NAME(HASHED),
241
RQF_NAME(STATS),
242
RQF_NAME(SPECIAL_PAYLOAD),
243
RQF_NAME(ZONE_WRITE_PLUGGING),
244
RQF_NAME(TIMED_OUT),
245
RQF_NAME(RESV),
246
};
247
#undef RQF_NAME
248
249
static const char *const blk_mq_rq_state_name_array[] = {
250
[MQ_RQ_IDLE] = "idle",
251
[MQ_RQ_IN_FLIGHT] = "in_flight",
252
[MQ_RQ_COMPLETE] = "complete",
253
};
254
255
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
256
{
257
if (WARN_ON_ONCE((unsigned int)rq_state >=
258
ARRAY_SIZE(blk_mq_rq_state_name_array)))
259
return "(?)";
260
return blk_mq_rq_state_name_array[rq_state];
261
}
262
263
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
264
{
265
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
266
const enum req_op op = req_op(rq);
267
const char *op_str = blk_op_str(op);
268
269
BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS);
270
BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS);
271
272
seq_printf(m, "%p {.op=", rq);
273
if (strcmp(op_str, "UNKNOWN") == 0)
274
seq_printf(m, "%u", op);
275
else
276
seq_printf(m, "%s", op_str);
277
seq_puts(m, ", .cmd_flags=");
278
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
279
cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
280
seq_puts(m, ", .rq_flags=");
281
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
282
ARRAY_SIZE(rqf_name));
283
seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
284
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
285
rq->internal_tag);
286
if (mq_ops->show_rq)
287
mq_ops->show_rq(m, rq);
288
seq_puts(m, "}\n");
289
return 0;
290
}
291
EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
292
293
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
294
{
295
return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
296
}
297
EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
298
299
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
300
__acquires(&hctx->lock)
301
{
302
struct blk_mq_hw_ctx *hctx = m->private;
303
304
spin_lock(&hctx->lock);
305
return seq_list_start(&hctx->dispatch, *pos);
306
}
307
308
static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
309
{
310
struct blk_mq_hw_ctx *hctx = m->private;
311
312
return seq_list_next(v, &hctx->dispatch, pos);
313
}
314
315
static void hctx_dispatch_stop(struct seq_file *m, void *v)
316
__releases(&hctx->lock)
317
{
318
struct blk_mq_hw_ctx *hctx = m->private;
319
320
spin_unlock(&hctx->lock);
321
}
322
323
static const struct seq_operations hctx_dispatch_seq_ops = {
324
.start = hctx_dispatch_start,
325
.next = hctx_dispatch_next,
326
.stop = hctx_dispatch_stop,
327
.show = blk_mq_debugfs_rq_show,
328
};
329
330
struct show_busy_params {
331
struct seq_file *m;
332
struct blk_mq_hw_ctx *hctx;
333
};
334
335
/*
336
* Note: the state of a request may change while this function is in progress,
337
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
338
* keep iterating requests.
339
*/
340
static bool hctx_show_busy_rq(struct request *rq, void *data)
341
{
342
const struct show_busy_params *params = data;
343
344
if (rq->mq_hctx == params->hctx)
345
__blk_mq_debugfs_rq_show(params->m, rq);
346
347
return true;
348
}
349
350
static int hctx_busy_show(void *data, struct seq_file *m)
351
{
352
struct blk_mq_hw_ctx *hctx = data;
353
struct show_busy_params params = { .m = m, .hctx = hctx };
354
int res;
355
356
res = mutex_lock_interruptible(&hctx->queue->elevator_lock);
357
if (res)
358
return res;
359
blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
360
&params);
361
mutex_unlock(&hctx->queue->elevator_lock);
362
363
return 0;
364
}
365
366
static const char *const hctx_types[] = {
367
[HCTX_TYPE_DEFAULT] = "default",
368
[HCTX_TYPE_READ] = "read",
369
[HCTX_TYPE_POLL] = "poll",
370
};
371
372
static int hctx_type_show(void *data, struct seq_file *m)
373
{
374
struct blk_mq_hw_ctx *hctx = data;
375
376
BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
377
seq_printf(m, "%s\n", hctx_types[hctx->type]);
378
return 0;
379
}
380
381
static int hctx_ctx_map_show(void *data, struct seq_file *m)
382
{
383
struct blk_mq_hw_ctx *hctx = data;
384
385
sbitmap_bitmap_show(&hctx->ctx_map, m);
386
return 0;
387
}
388
389
static void blk_mq_debugfs_tags_show(struct seq_file *m,
390
struct blk_mq_tags *tags)
391
{
392
seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
393
seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
394
seq_printf(m, "active_queues=%d\n",
395
READ_ONCE(tags->active_queues));
396
397
seq_puts(m, "\nbitmap_tags:\n");
398
sbitmap_queue_show(&tags->bitmap_tags, m);
399
400
if (tags->nr_reserved_tags) {
401
seq_puts(m, "\nbreserved_tags:\n");
402
sbitmap_queue_show(&tags->breserved_tags, m);
403
}
404
}
405
406
static int hctx_tags_show(void *data, struct seq_file *m)
407
{
408
struct blk_mq_hw_ctx *hctx = data;
409
struct request_queue *q = hctx->queue;
410
int res;
411
412
res = mutex_lock_interruptible(&q->elevator_lock);
413
if (res)
414
return res;
415
if (hctx->tags)
416
blk_mq_debugfs_tags_show(m, hctx->tags);
417
mutex_unlock(&q->elevator_lock);
418
419
return 0;
420
}
421
422
static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
423
{
424
struct blk_mq_hw_ctx *hctx = data;
425
struct request_queue *q = hctx->queue;
426
int res;
427
428
res = mutex_lock_interruptible(&q->elevator_lock);
429
if (res)
430
return res;
431
if (hctx->tags)
432
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
433
mutex_unlock(&q->elevator_lock);
434
435
return 0;
436
}
437
438
static int hctx_sched_tags_show(void *data, struct seq_file *m)
439
{
440
struct blk_mq_hw_ctx *hctx = data;
441
struct request_queue *q = hctx->queue;
442
int res;
443
444
res = mutex_lock_interruptible(&q->elevator_lock);
445
if (res)
446
return res;
447
if (hctx->sched_tags)
448
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
449
mutex_unlock(&q->elevator_lock);
450
451
return 0;
452
}
453
454
static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
455
{
456
struct blk_mq_hw_ctx *hctx = data;
457
struct request_queue *q = hctx->queue;
458
int res;
459
460
res = mutex_lock_interruptible(&q->elevator_lock);
461
if (res)
462
return res;
463
if (hctx->sched_tags)
464
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
465
mutex_unlock(&q->elevator_lock);
466
467
return 0;
468
}
469
470
static int hctx_active_show(void *data, struct seq_file *m)
471
{
472
struct blk_mq_hw_ctx *hctx = data;
473
474
seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
475
return 0;
476
}
477
478
static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
479
{
480
struct blk_mq_hw_ctx *hctx = data;
481
482
seq_printf(m, "%u\n", hctx->dispatch_busy);
483
return 0;
484
}
485
486
#define CTX_RQ_SEQ_OPS(name, type) \
487
static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
488
__acquires(&ctx->lock) \
489
{ \
490
struct blk_mq_ctx *ctx = m->private; \
491
\
492
spin_lock(&ctx->lock); \
493
return seq_list_start(&ctx->rq_lists[type], *pos); \
494
} \
495
\
496
static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
497
loff_t *pos) \
498
{ \
499
struct blk_mq_ctx *ctx = m->private; \
500
\
501
return seq_list_next(v, &ctx->rq_lists[type], pos); \
502
} \
503
\
504
static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
505
__releases(&ctx->lock) \
506
{ \
507
struct blk_mq_ctx *ctx = m->private; \
508
\
509
spin_unlock(&ctx->lock); \
510
} \
511
\
512
static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
513
.start = ctx_##name##_rq_list_start, \
514
.next = ctx_##name##_rq_list_next, \
515
.stop = ctx_##name##_rq_list_stop, \
516
.show = blk_mq_debugfs_rq_show, \
517
}
518
519
CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
520
CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
521
CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
522
523
static int blk_mq_debugfs_show(struct seq_file *m, void *v)
524
{
525
const struct blk_mq_debugfs_attr *attr = m->private;
526
void *data = debugfs_get_aux(m->file);
527
528
return attr->show(data, m);
529
}
530
531
static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
532
size_t count, loff_t *ppos)
533
{
534
struct seq_file *m = file->private_data;
535
const struct blk_mq_debugfs_attr *attr = m->private;
536
void *data = debugfs_get_aux(file);
537
538
/*
539
* Attributes that only implement .seq_ops are read-only and 'attr' is
540
* the same with 'data' in this case.
541
*/
542
if (attr == data || !attr->write)
543
return -EPERM;
544
545
return attr->write(data, buf, count, ppos);
546
}
547
548
static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
549
{
550
const struct blk_mq_debugfs_attr *attr = inode->i_private;
551
void *data = debugfs_get_aux(file);
552
struct seq_file *m;
553
int ret;
554
555
if (attr->seq_ops) {
556
ret = seq_open(file, attr->seq_ops);
557
if (!ret) {
558
m = file->private_data;
559
m->private = data;
560
}
561
return ret;
562
}
563
564
if (WARN_ON_ONCE(!attr->show))
565
return -EPERM;
566
567
return single_open(file, blk_mq_debugfs_show, inode->i_private);
568
}
569
570
static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
571
{
572
const struct blk_mq_debugfs_attr *attr = inode->i_private;
573
574
if (attr->show)
575
return single_release(inode, file);
576
577
return seq_release(inode, file);
578
}
579
580
static const struct file_operations blk_mq_debugfs_fops = {
581
.open = blk_mq_debugfs_open,
582
.read = seq_read,
583
.write = blk_mq_debugfs_write,
584
.llseek = seq_lseek,
585
.release = blk_mq_debugfs_release,
586
};
587
588
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
589
{"state", 0400, hctx_state_show},
590
{"flags", 0400, hctx_flags_show},
591
{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
592
{"busy", 0400, hctx_busy_show},
593
{"ctx_map", 0400, hctx_ctx_map_show},
594
{"tags", 0400, hctx_tags_show},
595
{"tags_bitmap", 0400, hctx_tags_bitmap_show},
596
{"sched_tags", 0400, hctx_sched_tags_show},
597
{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
598
{"active", 0400, hctx_active_show},
599
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
600
{"type", 0400, hctx_type_show},
601
{},
602
};
603
604
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
605
{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
606
{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
607
{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
608
{},
609
};
610
611
static void debugfs_create_files(struct dentry *parent, void *data,
612
const struct blk_mq_debugfs_attr *attr)
613
{
614
if (IS_ERR_OR_NULL(parent))
615
return;
616
617
for (; attr->name; attr++)
618
debugfs_create_file_aux(attr->name, attr->mode, parent,
619
(void *)attr, data, &blk_mq_debugfs_fops);
620
}
621
622
void blk_mq_debugfs_register(struct request_queue *q)
623
{
624
struct blk_mq_hw_ctx *hctx;
625
unsigned long i;
626
627
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
628
629
queue_for_each_hw_ctx(q, hctx, i) {
630
if (!hctx->debugfs_dir)
631
blk_mq_debugfs_register_hctx(q, hctx);
632
}
633
634
if (q->rq_qos) {
635
struct rq_qos *rqos = q->rq_qos;
636
637
while (rqos) {
638
blk_mq_debugfs_register_rqos(rqos);
639
rqos = rqos->next;
640
}
641
}
642
}
643
644
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
645
struct blk_mq_ctx *ctx)
646
{
647
struct dentry *ctx_dir;
648
char name[20];
649
650
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
651
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
652
653
debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
654
}
655
656
void blk_mq_debugfs_register_hctx(struct request_queue *q,
657
struct blk_mq_hw_ctx *hctx)
658
{
659
struct blk_mq_ctx *ctx;
660
char name[20];
661
int i;
662
663
if (!q->debugfs_dir)
664
return;
665
666
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
667
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
668
669
debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
670
671
hctx_for_each_ctx(hctx, ctx, i)
672
blk_mq_debugfs_register_ctx(hctx, ctx);
673
}
674
675
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
676
{
677
if (!hctx->queue->debugfs_dir)
678
return;
679
debugfs_remove_recursive(hctx->debugfs_dir);
680
hctx->sched_debugfs_dir = NULL;
681
hctx->debugfs_dir = NULL;
682
}
683
684
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
685
{
686
struct blk_mq_hw_ctx *hctx;
687
unsigned long i;
688
689
queue_for_each_hw_ctx(q, hctx, i)
690
blk_mq_debugfs_register_hctx(q, hctx);
691
}
692
693
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
694
{
695
struct blk_mq_hw_ctx *hctx;
696
unsigned long i;
697
698
queue_for_each_hw_ctx(q, hctx, i)
699
blk_mq_debugfs_unregister_hctx(hctx);
700
}
701
702
void blk_mq_debugfs_register_sched(struct request_queue *q)
703
{
704
struct elevator_type *e = q->elevator->type;
705
706
lockdep_assert_held(&q->debugfs_mutex);
707
708
/*
709
* If the parent directory has not been created yet, return, we will be
710
* called again later on and the directory/files will be created then.
711
*/
712
if (!q->debugfs_dir)
713
return;
714
715
if (!e->queue_debugfs_attrs)
716
return;
717
718
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
719
720
debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
721
}
722
723
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
724
{
725
lockdep_assert_held(&q->debugfs_mutex);
726
727
debugfs_remove_recursive(q->sched_debugfs_dir);
728
q->sched_debugfs_dir = NULL;
729
}
730
731
static const char *rq_qos_id_to_name(enum rq_qos_id id)
732
{
733
switch (id) {
734
case RQ_QOS_WBT:
735
return "wbt";
736
case RQ_QOS_LATENCY:
737
return "latency";
738
case RQ_QOS_COST:
739
return "cost";
740
}
741
return "unknown";
742
}
743
744
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
745
{
746
lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
747
748
if (!rqos->disk->queue->debugfs_dir)
749
return;
750
debugfs_remove_recursive(rqos->debugfs_dir);
751
rqos->debugfs_dir = NULL;
752
}
753
754
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
755
{
756
struct request_queue *q = rqos->disk->queue;
757
const char *dir_name = rq_qos_id_to_name(rqos->id);
758
759
lockdep_assert_held(&q->debugfs_mutex);
760
761
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
762
return;
763
764
if (!q->rqos_debugfs_dir)
765
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
766
q->debugfs_dir);
767
768
rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
769
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
770
}
771
772
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
773
struct blk_mq_hw_ctx *hctx)
774
{
775
struct elevator_type *e = q->elevator->type;
776
777
lockdep_assert_held(&q->debugfs_mutex);
778
779
/*
780
* If the parent debugfs directory has not been created yet, return;
781
* We will be called again later on with appropriate parent debugfs
782
* directory from blk_register_queue()
783
*/
784
if (!hctx->debugfs_dir)
785
return;
786
787
if (!e->hctx_debugfs_attrs)
788
return;
789
790
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
791
hctx->debugfs_dir);
792
debugfs_create_files(hctx->sched_debugfs_dir, hctx,
793
e->hctx_debugfs_attrs);
794
}
795
796
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
797
{
798
lockdep_assert_held(&hctx->queue->debugfs_mutex);
799
800
if (!hctx->queue->debugfs_dir)
801
return;
802
debugfs_remove_recursive(hctx->sched_debugfs_dir);
803
hctx->sched_debugfs_dir = NULL;
804
}
805
806