1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-tag.h"
16 
blk_mq_sysfs_release(struct kobject * kobj)17 static void blk_mq_sysfs_release(struct kobject *kobj)
18 {
19 }
20 
blk_mq_hw_sysfs_release(struct kobject * kobj)21 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
22 {
23 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
24 						  kobj);
25 
26 	if (hctx->flags & BLK_MQ_F_BLOCKING)
27 		cleanup_srcu_struct(hctx->srcu);
28 	blk_free_flush_queue(hctx->fq);
29 	sbitmap_free(&hctx->ctx_map);
30 	free_cpumask_var(hctx->cpumask);
31 	kfree(hctx->ctxs);
32 	kfree(hctx);
33 }
34 
35 struct blk_mq_ctx_sysfs_entry {
36 	struct attribute attr;
37 	ssize_t (*show)(struct blk_mq_ctx *, char *);
38 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
39 };
40 
41 struct blk_mq_hw_ctx_sysfs_entry {
42 	struct attribute attr;
43 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
44 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
45 };
46 
blk_mq_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)47 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
48 				 char *page)
49 {
50 	struct blk_mq_ctx_sysfs_entry *entry;
51 	struct blk_mq_ctx *ctx;
52 	struct request_queue *q;
53 	ssize_t res;
54 
55 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
56 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
57 	q = ctx->queue;
58 
59 	if (!entry->show)
60 		return -EIO;
61 
62 	res = -ENOENT;
63 	mutex_lock(&q->sysfs_lock);
64 	if (!blk_queue_dying(q))
65 		res = entry->show(ctx, page);
66 	mutex_unlock(&q->sysfs_lock);
67 	return res;
68 }
69 
blk_mq_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)70 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
71 				  const char *page, size_t length)
72 {
73 	struct blk_mq_ctx_sysfs_entry *entry;
74 	struct blk_mq_ctx *ctx;
75 	struct request_queue *q;
76 	ssize_t res;
77 
78 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
79 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
80 	q = ctx->queue;
81 
82 	if (!entry->store)
83 		return -EIO;
84 
85 	res = -ENOENT;
86 	mutex_lock(&q->sysfs_lock);
87 	if (!blk_queue_dying(q))
88 		res = entry->store(ctx, page, length);
89 	mutex_unlock(&q->sysfs_lock);
90 	return res;
91 }
92 
blk_mq_hw_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)93 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
94 				    struct attribute *attr, char *page)
95 {
96 	struct blk_mq_hw_ctx_sysfs_entry *entry;
97 	struct blk_mq_hw_ctx *hctx;
98 	struct request_queue *q;
99 	ssize_t res;
100 
101 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
102 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
103 	q = hctx->queue;
104 
105 	if (!entry->show)
106 		return -EIO;
107 
108 	res = -ENOENT;
109 	mutex_lock(&q->sysfs_lock);
110 	if (!blk_queue_dying(q))
111 		res = entry->show(hctx, page);
112 	mutex_unlock(&q->sysfs_lock);
113 	return res;
114 }
115 
blk_mq_hw_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)116 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
117 				     struct attribute *attr, const char *page,
118 				     size_t length)
119 {
120 	struct blk_mq_hw_ctx_sysfs_entry *entry;
121 	struct blk_mq_hw_ctx *hctx;
122 	struct request_queue *q;
123 	ssize_t res;
124 
125 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
126 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
127 	q = hctx->queue;
128 
129 	if (!entry->store)
130 		return -EIO;
131 
132 	res = -ENOENT;
133 	mutex_lock(&q->sysfs_lock);
134 	if (!blk_queue_dying(q))
135 		res = entry->store(hctx, page, length);
136 	mutex_unlock(&q->sysfs_lock);
137 	return res;
138 }
139 
blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx * hctx,char * page)140 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
141 					    char *page)
142 {
143 	return sprintf(page, "%u\n", hctx->tags->nr_tags);
144 }
145 
blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx * hctx,char * page)146 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
147 						     char *page)
148 {
149 	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
150 }
151 
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx * hctx,char * page)152 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
153 {
154 	const size_t size = PAGE_SIZE - 1;
155 	unsigned int i, first = 1;
156 	int ret = 0, pos = 0;
157 
158 	for_each_cpu(i, hctx->cpumask) {
159 		if (first)
160 			ret = snprintf(pos + page, size - pos, "%u", i);
161 		else
162 			ret = snprintf(pos + page, size - pos, ", %u", i);
163 
164 		if (ret >= size - pos)
165 			break;
166 
167 		first = 0;
168 		pos += ret;
169 	}
170 
171 	ret = snprintf(pos + page, size + 1 - pos, "\n");
172 	return pos + ret;
173 }
174 
175 static struct attribute *default_ctx_attrs[] = {
176 	NULL,
177 };
178 
179 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
180 	.attr = {.name = "nr_tags", .mode = 0444 },
181 	.show = blk_mq_hw_sysfs_nr_tags_show,
182 };
183 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
184 	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
185 	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
186 };
187 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
188 	.attr = {.name = "cpu_list", .mode = 0444 },
189 	.show = blk_mq_hw_sysfs_cpus_show,
190 };
191 
192 static struct attribute *default_hw_ctx_attrs[] = {
193 	&blk_mq_hw_sysfs_nr_tags.attr,
194 	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
195 	&blk_mq_hw_sysfs_cpus.attr,
196 	NULL,
197 };
198 
199 static const struct sysfs_ops blk_mq_sysfs_ops = {
200 	.show	= blk_mq_sysfs_show,
201 	.store	= blk_mq_sysfs_store,
202 };
203 
204 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
205 	.show	= blk_mq_hw_sysfs_show,
206 	.store	= blk_mq_hw_sysfs_store,
207 };
208 
209 static struct kobj_type blk_mq_ktype = {
210 	.sysfs_ops	= &blk_mq_sysfs_ops,
211 	.release	= blk_mq_sysfs_release,
212 };
213 
214 static struct kobj_type blk_mq_ctx_ktype = {
215 	.sysfs_ops	= &blk_mq_sysfs_ops,
216 	.default_attrs	= default_ctx_attrs,
217 	.release	= blk_mq_sysfs_release,
218 };
219 
220 static struct kobj_type blk_mq_hw_ktype = {
221 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
222 	.default_attrs	= default_hw_ctx_attrs,
223 	.release	= blk_mq_hw_sysfs_release,
224 };
225 
blk_mq_unregister_hctx(struct blk_mq_hw_ctx * hctx)226 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
227 {
228 	struct blk_mq_ctx *ctx;
229 	int i;
230 
231 	if (!hctx->nr_ctx)
232 		return;
233 
234 	hctx_for_each_ctx(hctx, ctx, i)
235 		kobject_del(&ctx->kobj);
236 
237 	kobject_del(&hctx->kobj);
238 }
239 
blk_mq_register_hctx(struct blk_mq_hw_ctx * hctx)240 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
241 {
242 	struct request_queue *q = hctx->queue;
243 	struct blk_mq_ctx *ctx;
244 	int i, j, ret;
245 
246 	if (!hctx->nr_ctx)
247 		return 0;
248 
249 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
250 	if (ret)
251 		return ret;
252 
253 	hctx_for_each_ctx(hctx, ctx, i) {
254 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
255 		if (ret)
256 			goto out;
257 	}
258 
259 	return 0;
260 out:
261 	hctx_for_each_ctx(hctx, ctx, j) {
262 		if (j < i)
263 			kobject_del(&ctx->kobj);
264 	}
265 	kobject_del(&hctx->kobj);
266 	return ret;
267 }
268 
blk_mq_unregister_dev(struct device * dev,struct request_queue * q)269 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
270 {
271 	struct blk_mq_hw_ctx *hctx;
272 	int i;
273 
274 	lockdep_assert_held(&q->sysfs_dir_lock);
275 
276 	queue_for_each_hw_ctx(q, hctx, i)
277 		blk_mq_unregister_hctx(hctx);
278 
279 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
280 	kobject_del(&q->mq_kobj);
281 	kobject_put(&dev->kobj);
282 
283 	q->mq_sysfs_init_done = false;
284 }
285 
blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx * hctx)286 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
287 {
288 	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
289 }
290 
blk_mq_sysfs_deinit(struct request_queue * q)291 void blk_mq_sysfs_deinit(struct request_queue *q)
292 {
293 	struct blk_mq_ctx *ctx;
294 	int cpu;
295 
296 	for_each_possible_cpu(cpu) {
297 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
298 		kobject_put(&ctx->kobj);
299 	}
300 	kobject_put(&q->mq_kobj);
301 }
302 
blk_mq_sysfs_init(struct request_queue * q)303 void blk_mq_sysfs_init(struct request_queue *q)
304 {
305 	struct blk_mq_ctx *ctx;
306 	int cpu;
307 
308 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
309 
310 	for_each_possible_cpu(cpu) {
311 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
312 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
313 	}
314 }
315 
__blk_mq_register_dev(struct device * dev,struct request_queue * q)316 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
317 {
318 	struct blk_mq_hw_ctx *hctx;
319 	int ret, i;
320 
321 	WARN_ON_ONCE(!q->kobj.parent);
322 	lockdep_assert_held(&q->sysfs_dir_lock);
323 
324 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
325 	if (ret < 0)
326 		goto out;
327 
328 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
329 
330 	queue_for_each_hw_ctx(q, hctx, i) {
331 		ret = blk_mq_register_hctx(hctx);
332 		if (ret)
333 			goto unreg;
334 	}
335 
336 	q->mq_sysfs_init_done = true;
337 
338 out:
339 	return ret;
340 
341 unreg:
342 	while (--i >= 0)
343 		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
344 
345 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
346 	kobject_del(&q->mq_kobj);
347 	kobject_put(&dev->kobj);
348 	return ret;
349 }
350 
blk_mq_register_dev(struct device * dev,struct request_queue * q)351 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
352 {
353 	int ret;
354 
355 	mutex_lock(&q->sysfs_lock);
356 	ret = __blk_mq_register_dev(dev, q);
357 	mutex_unlock(&q->sysfs_lock);
358 
359 	return ret;
360 }
361 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
362 
blk_mq_sysfs_unregister(struct request_queue * q)363 void blk_mq_sysfs_unregister(struct request_queue *q)
364 {
365 	struct blk_mq_hw_ctx *hctx;
366 	int i;
367 
368 	mutex_lock(&q->sysfs_dir_lock);
369 	if (!q->mq_sysfs_init_done)
370 		goto unlock;
371 
372 	queue_for_each_hw_ctx(q, hctx, i)
373 		blk_mq_unregister_hctx(hctx);
374 
375 unlock:
376 	mutex_unlock(&q->sysfs_dir_lock);
377 }
378 
blk_mq_sysfs_register(struct request_queue * q)379 int blk_mq_sysfs_register(struct request_queue *q)
380 {
381 	struct blk_mq_hw_ctx *hctx;
382 	int i, ret = 0;
383 
384 	mutex_lock(&q->sysfs_dir_lock);
385 	if (!q->mq_sysfs_init_done)
386 		goto unlock;
387 
388 	queue_for_each_hw_ctx(q, hctx, i) {
389 		ret = blk_mq_register_hctx(hctx);
390 		if (ret)
391 			break;
392 	}
393 
394 unlock:
395 	mutex_unlock(&q->sysfs_dir_lock);
396 
397 	return ret;
398 }
399