1 /*
2  * Copyright (C) 2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20 
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
26 
print_stat(struct seq_file * m,struct blk_rq_stat * stat)27 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
28 {
29 	if (stat->nr_samples) {
30 		seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
31 			   stat->nr_samples, stat->mean, stat->min, stat->max);
32 	} else {
33 		seq_puts(m, "samples=0");
34 	}
35 }
36 
queue_poll_stat_show(void * data,struct seq_file * m)37 static int queue_poll_stat_show(void *data, struct seq_file *m)
38 {
39 	struct request_queue *q = data;
40 	int bucket;
41 
42 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
43 		seq_printf(m, "read  (%d Bytes): ", 1 << (9+bucket));
44 		print_stat(m, &q->poll_stat[2*bucket]);
45 		seq_puts(m, "\n");
46 
47 		seq_printf(m, "write (%d Bytes): ",  1 << (9+bucket));
48 		print_stat(m, &q->poll_stat[2*bucket+1]);
49 		seq_puts(m, "\n");
50 	}
51 	return 0;
52 }
53 
queue_requeue_list_start(struct seq_file * m,loff_t * pos)54 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
55 	__acquires(&q->requeue_lock)
56 {
57 	struct request_queue *q = m->private;
58 
59 	spin_lock_irq(&q->requeue_lock);
60 	return seq_list_start(&q->requeue_list, *pos);
61 }
62 
queue_requeue_list_next(struct seq_file * m,void * v,loff_t * pos)63 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
64 {
65 	struct request_queue *q = m->private;
66 
67 	return seq_list_next(v, &q->requeue_list, pos);
68 }
69 
queue_requeue_list_stop(struct seq_file * m,void * v)70 static void queue_requeue_list_stop(struct seq_file *m, void *v)
71 	__releases(&q->requeue_lock)
72 {
73 	struct request_queue *q = m->private;
74 
75 	spin_unlock_irq(&q->requeue_lock);
76 }
77 
78 static const struct seq_operations queue_requeue_list_seq_ops = {
79 	.start	= queue_requeue_list_start,
80 	.next	= queue_requeue_list_next,
81 	.stop	= queue_requeue_list_stop,
82 	.show	= blk_mq_debugfs_rq_show,
83 };
84 
blk_flags_show(struct seq_file * m,const unsigned long flags,const char * const * flag_name,int flag_name_count)85 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
86 			  const char *const *flag_name, int flag_name_count)
87 {
88 	bool sep = false;
89 	int i;
90 
91 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
92 		if (!(flags & BIT(i)))
93 			continue;
94 		if (sep)
95 			seq_puts(m, "|");
96 		sep = true;
97 		if (i < flag_name_count && flag_name[i])
98 			seq_puts(m, flag_name[i]);
99 		else
100 			seq_printf(m, "%d", i);
101 	}
102 	return 0;
103 }
104 
queue_pm_only_show(void * data,struct seq_file * m)105 static int queue_pm_only_show(void *data, struct seq_file *m)
106 {
107 	struct request_queue *q = data;
108 
109 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
110 	return 0;
111 }
112 
113 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
114 static const char *const blk_queue_flag_name[] = {
115 	QUEUE_FLAG_NAME(QUEUED),
116 	QUEUE_FLAG_NAME(STOPPED),
117 	QUEUE_FLAG_NAME(DYING),
118 	QUEUE_FLAG_NAME(BYPASS),
119 	QUEUE_FLAG_NAME(BIDI),
120 	QUEUE_FLAG_NAME(NOMERGES),
121 	QUEUE_FLAG_NAME(SAME_COMP),
122 	QUEUE_FLAG_NAME(FAIL_IO),
123 	QUEUE_FLAG_NAME(NONROT),
124 	QUEUE_FLAG_NAME(IO_STAT),
125 	QUEUE_FLAG_NAME(DISCARD),
126 	QUEUE_FLAG_NAME(NOXMERGES),
127 	QUEUE_FLAG_NAME(ADD_RANDOM),
128 	QUEUE_FLAG_NAME(SECERASE),
129 	QUEUE_FLAG_NAME(SAME_FORCE),
130 	QUEUE_FLAG_NAME(DEAD),
131 	QUEUE_FLAG_NAME(INIT_DONE),
132 	QUEUE_FLAG_NAME(NO_SG_MERGE),
133 	QUEUE_FLAG_NAME(POLL),
134 	QUEUE_FLAG_NAME(WC),
135 	QUEUE_FLAG_NAME(FUA),
136 	QUEUE_FLAG_NAME(FLUSH_NQ),
137 	QUEUE_FLAG_NAME(DAX),
138 	QUEUE_FLAG_NAME(STATS),
139 	QUEUE_FLAG_NAME(POLL_STATS),
140 	QUEUE_FLAG_NAME(REGISTERED),
141 	QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
142 	QUEUE_FLAG_NAME(QUIESCED),
143 };
144 #undef QUEUE_FLAG_NAME
145 
queue_state_show(void * data,struct seq_file * m)146 static int queue_state_show(void *data, struct seq_file *m)
147 {
148 	struct request_queue *q = data;
149 
150 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
151 		       ARRAY_SIZE(blk_queue_flag_name));
152 	seq_puts(m, "\n");
153 	return 0;
154 }
155 
queue_state_write(void * data,const char __user * buf,size_t count,loff_t * ppos)156 static ssize_t queue_state_write(void *data, const char __user *buf,
157 				 size_t count, loff_t *ppos)
158 {
159 	struct request_queue *q = data;
160 	char opbuf[16] = { }, *op;
161 
162 	/*
163 	 * The "state" attribute is removed after blk_cleanup_queue() has called
164 	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
165 	 * triggering a use-after-free.
166 	 */
167 	if (blk_queue_dead(q))
168 		return -ENOENT;
169 
170 	if (count >= sizeof(opbuf)) {
171 		pr_err("%s: operation too long\n", __func__);
172 		goto inval;
173 	}
174 
175 	if (copy_from_user(opbuf, buf, count))
176 		return -EFAULT;
177 	op = strstrip(opbuf);
178 	if (strcmp(op, "run") == 0) {
179 		blk_mq_run_hw_queues(q, true);
180 	} else if (strcmp(op, "start") == 0) {
181 		blk_mq_start_stopped_hw_queues(q, true);
182 	} else if (strcmp(op, "kick") == 0) {
183 		blk_mq_kick_requeue_list(q);
184 	} else {
185 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
186 inval:
187 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
188 		return -EINVAL;
189 	}
190 	return count;
191 }
192 
queue_write_hint_show(void * data,struct seq_file * m)193 static int queue_write_hint_show(void *data, struct seq_file *m)
194 {
195 	struct request_queue *q = data;
196 	int i;
197 
198 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
199 		seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
200 
201 	return 0;
202 }
203 
queue_write_hint_store(void * data,const char __user * buf,size_t count,loff_t * ppos)204 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
205 				      size_t count, loff_t *ppos)
206 {
207 	struct request_queue *q = data;
208 	int i;
209 
210 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
211 		q->write_hints[i] = 0;
212 
213 	return count;
214 }
215 
216 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
217 	{ "poll_stat", 0400, queue_poll_stat_show },
218 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
219 	{ "pm_only", 0600, queue_pm_only_show, NULL },
220 	{ "state", 0600, queue_state_show, queue_state_write },
221 	{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
222 	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
223 	{ },
224 };
225 
226 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
227 static const char *const hctx_state_name[] = {
228 	HCTX_STATE_NAME(STOPPED),
229 	HCTX_STATE_NAME(TAG_ACTIVE),
230 	HCTX_STATE_NAME(SCHED_RESTART),
231 };
232 #undef HCTX_STATE_NAME
233 
hctx_state_show(void * data,struct seq_file * m)234 static int hctx_state_show(void *data, struct seq_file *m)
235 {
236 	struct blk_mq_hw_ctx *hctx = data;
237 
238 	blk_flags_show(m, hctx->state, hctx_state_name,
239 		       ARRAY_SIZE(hctx_state_name));
240 	seq_puts(m, "\n");
241 	return 0;
242 }
243 
244 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
245 static const char *const alloc_policy_name[] = {
246 	BLK_TAG_ALLOC_NAME(FIFO),
247 	BLK_TAG_ALLOC_NAME(RR),
248 };
249 #undef BLK_TAG_ALLOC_NAME
250 
251 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
252 static const char *const hctx_flag_name[] = {
253 	HCTX_FLAG_NAME(SHOULD_MERGE),
254 	HCTX_FLAG_NAME(TAG_SHARED),
255 	HCTX_FLAG_NAME(SG_MERGE),
256 	HCTX_FLAG_NAME(BLOCKING),
257 	HCTX_FLAG_NAME(NO_SCHED),
258 };
259 #undef HCTX_FLAG_NAME
260 
hctx_flags_show(void * data,struct seq_file * m)261 static int hctx_flags_show(void *data, struct seq_file *m)
262 {
263 	struct blk_mq_hw_ctx *hctx = data;
264 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
265 
266 	seq_puts(m, "alloc_policy=");
267 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
268 	    alloc_policy_name[alloc_policy])
269 		seq_puts(m, alloc_policy_name[alloc_policy]);
270 	else
271 		seq_printf(m, "%d", alloc_policy);
272 	seq_puts(m, " ");
273 	blk_flags_show(m,
274 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
275 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
276 	seq_puts(m, "\n");
277 	return 0;
278 }
279 
280 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
281 static const char *const op_name[] = {
282 	REQ_OP_NAME(READ),
283 	REQ_OP_NAME(WRITE),
284 	REQ_OP_NAME(FLUSH),
285 	REQ_OP_NAME(DISCARD),
286 	REQ_OP_NAME(ZONE_REPORT),
287 	REQ_OP_NAME(SECURE_ERASE),
288 	REQ_OP_NAME(ZONE_RESET),
289 	REQ_OP_NAME(WRITE_SAME),
290 	REQ_OP_NAME(WRITE_ZEROES),
291 	REQ_OP_NAME(SCSI_IN),
292 	REQ_OP_NAME(SCSI_OUT),
293 	REQ_OP_NAME(DRV_IN),
294 	REQ_OP_NAME(DRV_OUT),
295 };
296 #undef REQ_OP_NAME
297 
298 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
299 static const char *const cmd_flag_name[] = {
300 	CMD_FLAG_NAME(FAILFAST_DEV),
301 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
302 	CMD_FLAG_NAME(FAILFAST_DRIVER),
303 	CMD_FLAG_NAME(SYNC),
304 	CMD_FLAG_NAME(META),
305 	CMD_FLAG_NAME(PRIO),
306 	CMD_FLAG_NAME(NOMERGE),
307 	CMD_FLAG_NAME(IDLE),
308 	CMD_FLAG_NAME(INTEGRITY),
309 	CMD_FLAG_NAME(FUA),
310 	CMD_FLAG_NAME(PREFLUSH),
311 	CMD_FLAG_NAME(RAHEAD),
312 	CMD_FLAG_NAME(BACKGROUND),
313 	CMD_FLAG_NAME(NOUNMAP),
314 	CMD_FLAG_NAME(NOWAIT),
315 };
316 #undef CMD_FLAG_NAME
317 
318 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
319 static const char *const rqf_name[] = {
320 	RQF_NAME(SORTED),
321 	RQF_NAME(STARTED),
322 	RQF_NAME(QUEUED),
323 	RQF_NAME(SOFTBARRIER),
324 	RQF_NAME(FLUSH_SEQ),
325 	RQF_NAME(MIXED_MERGE),
326 	RQF_NAME(MQ_INFLIGHT),
327 	RQF_NAME(DONTPREP),
328 	RQF_NAME(PREEMPT),
329 	RQF_NAME(COPY_USER),
330 	RQF_NAME(FAILED),
331 	RQF_NAME(QUIET),
332 	RQF_NAME(ELVPRIV),
333 	RQF_NAME(IO_STAT),
334 	RQF_NAME(ALLOCED),
335 	RQF_NAME(PM),
336 	RQF_NAME(HASHED),
337 	RQF_NAME(STATS),
338 	RQF_NAME(SPECIAL_PAYLOAD),
339 	RQF_NAME(ZONE_WRITE_LOCKED),
340 	RQF_NAME(MQ_POLL_SLEPT),
341 };
342 #undef RQF_NAME
343 
344 static const char *const blk_mq_rq_state_name_array[] = {
345 	[MQ_RQ_IDLE]		= "idle",
346 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
347 	[MQ_RQ_COMPLETE]	= "complete",
348 };
349 
blk_mq_rq_state_name(enum mq_rq_state rq_state)350 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
351 {
352 	if (WARN_ON_ONCE((unsigned int)rq_state >=
353 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
354 		return "(?)";
355 	return blk_mq_rq_state_name_array[rq_state];
356 }
357 
__blk_mq_debugfs_rq_show(struct seq_file * m,struct request * rq)358 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
359 {
360 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
361 	const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
362 
363 	seq_printf(m, "%p {.op=", rq);
364 	if (op < ARRAY_SIZE(op_name) && op_name[op])
365 		seq_printf(m, "%s", op_name[op]);
366 	else
367 		seq_printf(m, "%d", op);
368 	seq_puts(m, ", .cmd_flags=");
369 	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
370 		       ARRAY_SIZE(cmd_flag_name));
371 	seq_puts(m, ", .rq_flags=");
372 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
373 		       ARRAY_SIZE(rqf_name));
374 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
375 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
376 		   rq->internal_tag);
377 	if (mq_ops->show_rq)
378 		mq_ops->show_rq(m, rq);
379 	seq_puts(m, "}\n");
380 	return 0;
381 }
382 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
383 
blk_mq_debugfs_rq_show(struct seq_file * m,void * v)384 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
385 {
386 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
387 }
388 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
389 
hctx_dispatch_start(struct seq_file * m,loff_t * pos)390 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
391 	__acquires(&hctx->lock)
392 {
393 	struct blk_mq_hw_ctx *hctx = m->private;
394 
395 	spin_lock(&hctx->lock);
396 	return seq_list_start(&hctx->dispatch, *pos);
397 }
398 
hctx_dispatch_next(struct seq_file * m,void * v,loff_t * pos)399 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
400 {
401 	struct blk_mq_hw_ctx *hctx = m->private;
402 
403 	return seq_list_next(v, &hctx->dispatch, pos);
404 }
405 
hctx_dispatch_stop(struct seq_file * m,void * v)406 static void hctx_dispatch_stop(struct seq_file *m, void *v)
407 	__releases(&hctx->lock)
408 {
409 	struct blk_mq_hw_ctx *hctx = m->private;
410 
411 	spin_unlock(&hctx->lock);
412 }
413 
414 static const struct seq_operations hctx_dispatch_seq_ops = {
415 	.start	= hctx_dispatch_start,
416 	.next	= hctx_dispatch_next,
417 	.stop	= hctx_dispatch_stop,
418 	.show	= blk_mq_debugfs_rq_show,
419 };
420 
421 struct show_busy_params {
422 	struct seq_file		*m;
423 	struct blk_mq_hw_ctx	*hctx;
424 };
425 
426 /*
427  * Note: the state of a request may change while this function is in progress,
428  * e.g. due to a concurrent blk_mq_finish_request() call.
429  */
hctx_show_busy_rq(struct request * rq,void * data,bool reserved)430 static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
431 {
432 	const struct show_busy_params *params = data;
433 
434 	if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
435 	    blk_mq_rq_state(rq) != MQ_RQ_IDLE)
436 		__blk_mq_debugfs_rq_show(params->m,
437 					 list_entry_rq(&rq->queuelist));
438 }
439 
hctx_busy_show(void * data,struct seq_file * m)440 static int hctx_busy_show(void *data, struct seq_file *m)
441 {
442 	struct blk_mq_hw_ctx *hctx = data;
443 	struct show_busy_params params = { .m = m, .hctx = hctx };
444 
445 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
446 				&params);
447 
448 	return 0;
449 }
450 
hctx_ctx_map_show(void * data,struct seq_file * m)451 static int hctx_ctx_map_show(void *data, struct seq_file *m)
452 {
453 	struct blk_mq_hw_ctx *hctx = data;
454 
455 	sbitmap_bitmap_show(&hctx->ctx_map, m);
456 	return 0;
457 }
458 
blk_mq_debugfs_tags_show(struct seq_file * m,struct blk_mq_tags * tags)459 static void blk_mq_debugfs_tags_show(struct seq_file *m,
460 				     struct blk_mq_tags *tags)
461 {
462 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
463 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
464 	seq_printf(m, "active_queues=%d\n",
465 		   atomic_read(&tags->active_queues));
466 
467 	seq_puts(m, "\nbitmap_tags:\n");
468 	sbitmap_queue_show(&tags->bitmap_tags, m);
469 
470 	if (tags->nr_reserved_tags) {
471 		seq_puts(m, "\nbreserved_tags:\n");
472 		sbitmap_queue_show(&tags->breserved_tags, m);
473 	}
474 }
475 
hctx_tags_show(void * data,struct seq_file * m)476 static int hctx_tags_show(void *data, struct seq_file *m)
477 {
478 	struct blk_mq_hw_ctx *hctx = data;
479 	struct request_queue *q = hctx->queue;
480 	int res;
481 
482 	res = mutex_lock_interruptible(&q->sysfs_lock);
483 	if (res)
484 		goto out;
485 	if (hctx->tags)
486 		blk_mq_debugfs_tags_show(m, hctx->tags);
487 	mutex_unlock(&q->sysfs_lock);
488 
489 out:
490 	return res;
491 }
492 
hctx_tags_bitmap_show(void * data,struct seq_file * m)493 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
494 {
495 	struct blk_mq_hw_ctx *hctx = data;
496 	struct request_queue *q = hctx->queue;
497 	int res;
498 
499 	res = mutex_lock_interruptible(&q->sysfs_lock);
500 	if (res)
501 		goto out;
502 	if (hctx->tags)
503 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
504 	mutex_unlock(&q->sysfs_lock);
505 
506 out:
507 	return res;
508 }
509 
hctx_sched_tags_show(void * data,struct seq_file * m)510 static int hctx_sched_tags_show(void *data, struct seq_file *m)
511 {
512 	struct blk_mq_hw_ctx *hctx = data;
513 	struct request_queue *q = hctx->queue;
514 	int res;
515 
516 	res = mutex_lock_interruptible(&q->sysfs_lock);
517 	if (res)
518 		goto out;
519 	if (hctx->sched_tags)
520 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
521 	mutex_unlock(&q->sysfs_lock);
522 
523 out:
524 	return res;
525 }
526 
hctx_sched_tags_bitmap_show(void * data,struct seq_file * m)527 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
528 {
529 	struct blk_mq_hw_ctx *hctx = data;
530 	struct request_queue *q = hctx->queue;
531 	int res;
532 
533 	res = mutex_lock_interruptible(&q->sysfs_lock);
534 	if (res)
535 		goto out;
536 	if (hctx->sched_tags)
537 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
538 	mutex_unlock(&q->sysfs_lock);
539 
540 out:
541 	return res;
542 }
543 
hctx_io_poll_show(void * data,struct seq_file * m)544 static int hctx_io_poll_show(void *data, struct seq_file *m)
545 {
546 	struct blk_mq_hw_ctx *hctx = data;
547 
548 	seq_printf(m, "considered=%lu\n", hctx->poll_considered);
549 	seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
550 	seq_printf(m, "success=%lu\n", hctx->poll_success);
551 	return 0;
552 }
553 
hctx_io_poll_write(void * data,const char __user * buf,size_t count,loff_t * ppos)554 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
555 				  size_t count, loff_t *ppos)
556 {
557 	struct blk_mq_hw_ctx *hctx = data;
558 
559 	hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
560 	return count;
561 }
562 
hctx_dispatched_show(void * data,struct seq_file * m)563 static int hctx_dispatched_show(void *data, struct seq_file *m)
564 {
565 	struct blk_mq_hw_ctx *hctx = data;
566 	int i;
567 
568 	seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
569 
570 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
571 		unsigned int d = 1U << (i - 1);
572 
573 		seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
574 	}
575 
576 	seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
577 	return 0;
578 }
579 
hctx_dispatched_write(void * data,const char __user * buf,size_t count,loff_t * ppos)580 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
581 				     size_t count, loff_t *ppos)
582 {
583 	struct blk_mq_hw_ctx *hctx = data;
584 	int i;
585 
586 	for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
587 		hctx->dispatched[i] = 0;
588 	return count;
589 }
590 
hctx_queued_show(void * data,struct seq_file * m)591 static int hctx_queued_show(void *data, struct seq_file *m)
592 {
593 	struct blk_mq_hw_ctx *hctx = data;
594 
595 	seq_printf(m, "%lu\n", hctx->queued);
596 	return 0;
597 }
598 
hctx_queued_write(void * data,const char __user * buf,size_t count,loff_t * ppos)599 static ssize_t hctx_queued_write(void *data, const char __user *buf,
600 				 size_t count, loff_t *ppos)
601 {
602 	struct blk_mq_hw_ctx *hctx = data;
603 
604 	hctx->queued = 0;
605 	return count;
606 }
607 
hctx_run_show(void * data,struct seq_file * m)608 static int hctx_run_show(void *data, struct seq_file *m)
609 {
610 	struct blk_mq_hw_ctx *hctx = data;
611 
612 	seq_printf(m, "%lu\n", hctx->run);
613 	return 0;
614 }
615 
hctx_run_write(void * data,const char __user * buf,size_t count,loff_t * ppos)616 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
617 			      loff_t *ppos)
618 {
619 	struct blk_mq_hw_ctx *hctx = data;
620 
621 	hctx->run = 0;
622 	return count;
623 }
624 
hctx_active_show(void * data,struct seq_file * m)625 static int hctx_active_show(void *data, struct seq_file *m)
626 {
627 	struct blk_mq_hw_ctx *hctx = data;
628 
629 	seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
630 	return 0;
631 }
632 
hctx_dispatch_busy_show(void * data,struct seq_file * m)633 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
634 {
635 	struct blk_mq_hw_ctx *hctx = data;
636 
637 	seq_printf(m, "%u\n", hctx->dispatch_busy);
638 	return 0;
639 }
640 
ctx_rq_list_start(struct seq_file * m,loff_t * pos)641 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
642 	__acquires(&ctx->lock)
643 {
644 	struct blk_mq_ctx *ctx = m->private;
645 
646 	spin_lock(&ctx->lock);
647 	return seq_list_start(&ctx->rq_list, *pos);
648 }
649 
ctx_rq_list_next(struct seq_file * m,void * v,loff_t * pos)650 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
651 {
652 	struct blk_mq_ctx *ctx = m->private;
653 
654 	return seq_list_next(v, &ctx->rq_list, pos);
655 }
656 
ctx_rq_list_stop(struct seq_file * m,void * v)657 static void ctx_rq_list_stop(struct seq_file *m, void *v)
658 	__releases(&ctx->lock)
659 {
660 	struct blk_mq_ctx *ctx = m->private;
661 
662 	spin_unlock(&ctx->lock);
663 }
664 
665 static const struct seq_operations ctx_rq_list_seq_ops = {
666 	.start	= ctx_rq_list_start,
667 	.next	= ctx_rq_list_next,
668 	.stop	= ctx_rq_list_stop,
669 	.show	= blk_mq_debugfs_rq_show,
670 };
ctx_dispatched_show(void * data,struct seq_file * m)671 static int ctx_dispatched_show(void *data, struct seq_file *m)
672 {
673 	struct blk_mq_ctx *ctx = data;
674 
675 	seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
676 	return 0;
677 }
678 
ctx_dispatched_write(void * data,const char __user * buf,size_t count,loff_t * ppos)679 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
680 				    size_t count, loff_t *ppos)
681 {
682 	struct blk_mq_ctx *ctx = data;
683 
684 	ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
685 	return count;
686 }
687 
ctx_merged_show(void * data,struct seq_file * m)688 static int ctx_merged_show(void *data, struct seq_file *m)
689 {
690 	struct blk_mq_ctx *ctx = data;
691 
692 	seq_printf(m, "%lu\n", ctx->rq_merged);
693 	return 0;
694 }
695 
ctx_merged_write(void * data,const char __user * buf,size_t count,loff_t * ppos)696 static ssize_t ctx_merged_write(void *data, const char __user *buf,
697 				size_t count, loff_t *ppos)
698 {
699 	struct blk_mq_ctx *ctx = data;
700 
701 	ctx->rq_merged = 0;
702 	return count;
703 }
704 
ctx_completed_show(void * data,struct seq_file * m)705 static int ctx_completed_show(void *data, struct seq_file *m)
706 {
707 	struct blk_mq_ctx *ctx = data;
708 
709 	seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
710 	return 0;
711 }
712 
ctx_completed_write(void * data,const char __user * buf,size_t count,loff_t * ppos)713 static ssize_t ctx_completed_write(void *data, const char __user *buf,
714 				   size_t count, loff_t *ppos)
715 {
716 	struct blk_mq_ctx *ctx = data;
717 
718 	ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
719 	return count;
720 }
721 
blk_mq_debugfs_show(struct seq_file * m,void * v)722 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
723 {
724 	const struct blk_mq_debugfs_attr *attr = m->private;
725 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
726 
727 	return attr->show(data, m);
728 }
729 
blk_mq_debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)730 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
731 				    size_t count, loff_t *ppos)
732 {
733 	struct seq_file *m = file->private_data;
734 	const struct blk_mq_debugfs_attr *attr = m->private;
735 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
736 
737 	/*
738 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
739 	 * the same with 'data' in this case.
740 	 */
741 	if (attr == data || !attr->write)
742 		return -EPERM;
743 
744 	return attr->write(data, buf, count, ppos);
745 }
746 
blk_mq_debugfs_open(struct inode * inode,struct file * file)747 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
748 {
749 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
750 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
751 	struct seq_file *m;
752 	int ret;
753 
754 	if (attr->seq_ops) {
755 		ret = seq_open(file, attr->seq_ops);
756 		if (!ret) {
757 			m = file->private_data;
758 			m->private = data;
759 		}
760 		return ret;
761 	}
762 
763 	if (WARN_ON_ONCE(!attr->show))
764 		return -EPERM;
765 
766 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
767 }
768 
blk_mq_debugfs_release(struct inode * inode,struct file * file)769 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
770 {
771 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
772 
773 	if (attr->show)
774 		return single_release(inode, file);
775 	else
776 		return seq_release(inode, file);
777 }
778 
779 static const struct file_operations blk_mq_debugfs_fops = {
780 	.open		= blk_mq_debugfs_open,
781 	.read		= seq_read,
782 	.write		= blk_mq_debugfs_write,
783 	.llseek		= seq_lseek,
784 	.release	= blk_mq_debugfs_release,
785 };
786 
787 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
788 	{"state", 0400, hctx_state_show},
789 	{"flags", 0400, hctx_flags_show},
790 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
791 	{"busy", 0400, hctx_busy_show},
792 	{"ctx_map", 0400, hctx_ctx_map_show},
793 	{"tags", 0400, hctx_tags_show},
794 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
795 	{"sched_tags", 0400, hctx_sched_tags_show},
796 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
797 	{"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
798 	{"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
799 	{"queued", 0600, hctx_queued_show, hctx_queued_write},
800 	{"run", 0600, hctx_run_show, hctx_run_write},
801 	{"active", 0400, hctx_active_show},
802 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
803 	{},
804 };
805 
806 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
807 	{"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
808 	{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
809 	{"merged", 0600, ctx_merged_show, ctx_merged_write},
810 	{"completed", 0600, ctx_completed_show, ctx_completed_write},
811 	{},
812 };
813 
debugfs_create_files(struct dentry * parent,void * data,const struct blk_mq_debugfs_attr * attr)814 static bool debugfs_create_files(struct dentry *parent, void *data,
815 				 const struct blk_mq_debugfs_attr *attr)
816 {
817 	d_inode(parent)->i_private = data;
818 
819 	for (; attr->name; attr++) {
820 		if (!debugfs_create_file(attr->name, attr->mode, parent,
821 					 (void *)attr, &blk_mq_debugfs_fops))
822 			return false;
823 	}
824 	return true;
825 }
826 
blk_mq_debugfs_register(struct request_queue * q)827 int blk_mq_debugfs_register(struct request_queue *q)
828 {
829 	struct blk_mq_hw_ctx *hctx;
830 	int i;
831 
832 	if (!blk_debugfs_root)
833 		return -ENOENT;
834 
835 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
836 					    blk_debugfs_root);
837 	if (!q->debugfs_dir)
838 		return -ENOMEM;
839 
840 	if (!debugfs_create_files(q->debugfs_dir, q,
841 				  blk_mq_debugfs_queue_attrs))
842 		goto err;
843 
844 	/*
845 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
846 	 * didn't exist yet (because we don't know what to name the directory
847 	 * until the queue is registered to a gendisk).
848 	 */
849 	if (q->elevator && !q->sched_debugfs_dir)
850 		blk_mq_debugfs_register_sched(q);
851 
852 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
853 	queue_for_each_hw_ctx(q, hctx, i) {
854 		if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
855 			goto err;
856 		if (q->elevator && !hctx->sched_debugfs_dir &&
857 		    blk_mq_debugfs_register_sched_hctx(q, hctx))
858 			goto err;
859 	}
860 
861 	return 0;
862 
863 err:
864 	blk_mq_debugfs_unregister(q);
865 	return -ENOMEM;
866 }
867 
blk_mq_debugfs_unregister(struct request_queue * q)868 void blk_mq_debugfs_unregister(struct request_queue *q)
869 {
870 	debugfs_remove_recursive(q->debugfs_dir);
871 	q->sched_debugfs_dir = NULL;
872 	q->debugfs_dir = NULL;
873 }
874 
blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)875 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
876 				       struct blk_mq_ctx *ctx)
877 {
878 	struct dentry *ctx_dir;
879 	char name[20];
880 
881 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
882 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
883 	if (!ctx_dir)
884 		return -ENOMEM;
885 
886 	if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
887 		return -ENOMEM;
888 
889 	return 0;
890 }
891 
blk_mq_debugfs_register_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx)892 int blk_mq_debugfs_register_hctx(struct request_queue *q,
893 				 struct blk_mq_hw_ctx *hctx)
894 {
895 	struct blk_mq_ctx *ctx;
896 	char name[20];
897 	int i;
898 
899 	if (!q->debugfs_dir)
900 		return -ENOENT;
901 
902 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
903 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
904 	if (!hctx->debugfs_dir)
905 		return -ENOMEM;
906 
907 	if (!debugfs_create_files(hctx->debugfs_dir, hctx,
908 				  blk_mq_debugfs_hctx_attrs))
909 		goto err;
910 
911 	hctx_for_each_ctx(hctx, ctx, i) {
912 		if (blk_mq_debugfs_register_ctx(hctx, ctx))
913 			goto err;
914 	}
915 
916 	return 0;
917 
918 err:
919 	blk_mq_debugfs_unregister_hctx(hctx);
920 	return -ENOMEM;
921 }
922 
blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx * hctx)923 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
924 {
925 	debugfs_remove_recursive(hctx->debugfs_dir);
926 	hctx->sched_debugfs_dir = NULL;
927 	hctx->debugfs_dir = NULL;
928 }
929 
blk_mq_debugfs_register_hctxs(struct request_queue * q)930 int blk_mq_debugfs_register_hctxs(struct request_queue *q)
931 {
932 	struct blk_mq_hw_ctx *hctx;
933 	int i;
934 
935 	queue_for_each_hw_ctx(q, hctx, i) {
936 		if (blk_mq_debugfs_register_hctx(q, hctx))
937 			return -ENOMEM;
938 	}
939 
940 	return 0;
941 }
942 
blk_mq_debugfs_unregister_hctxs(struct request_queue * q)943 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
944 {
945 	struct blk_mq_hw_ctx *hctx;
946 	int i;
947 
948 	queue_for_each_hw_ctx(q, hctx, i)
949 		blk_mq_debugfs_unregister_hctx(hctx);
950 }
951 
blk_mq_debugfs_register_sched(struct request_queue * q)952 int blk_mq_debugfs_register_sched(struct request_queue *q)
953 {
954 	struct elevator_type *e = q->elevator->type;
955 
956 	if (!q->debugfs_dir)
957 		return -ENOENT;
958 
959 	if (!e->queue_debugfs_attrs)
960 		return 0;
961 
962 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
963 	if (!q->sched_debugfs_dir)
964 		return -ENOMEM;
965 
966 	if (!debugfs_create_files(q->sched_debugfs_dir, q,
967 				  e->queue_debugfs_attrs))
968 		goto err;
969 
970 	return 0;
971 
972 err:
973 	blk_mq_debugfs_unregister_sched(q);
974 	return -ENOMEM;
975 }
976 
blk_mq_debugfs_unregister_sched(struct request_queue * q)977 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
978 {
979 	debugfs_remove_recursive(q->sched_debugfs_dir);
980 	q->sched_debugfs_dir = NULL;
981 }
982 
blk_mq_debugfs_register_sched_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx)983 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
984 				       struct blk_mq_hw_ctx *hctx)
985 {
986 	struct elevator_type *e = q->elevator->type;
987 
988 	if (!hctx->debugfs_dir)
989 		return -ENOENT;
990 
991 	if (!e->hctx_debugfs_attrs)
992 		return 0;
993 
994 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
995 						     hctx->debugfs_dir);
996 	if (!hctx->sched_debugfs_dir)
997 		return -ENOMEM;
998 
999 	if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
1000 				  e->hctx_debugfs_attrs))
1001 		return -ENOMEM;
1002 
1003 	return 0;
1004 }
1005 
blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx * hctx)1006 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
1007 {
1008 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
1009 	hctx->sched_debugfs_dir = NULL;
1010 }
1011