1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  *
13  * For policy-specific per-blkcg data:
14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16  */
17 #include <linux/ioprio.h>
18 #include <linux/kdev_t.h>
19 #include <linux/module.h>
20 #include <linux/sched/signal.h>
21 #include <linux/err.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/slab.h>
25 #include <linux/genhd.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/ctype.h>
29 #include <linux/blk-cgroup.h>
30 #include <linux/tracehook.h>
31 #include "blk.h"
32 
33 #define MAX_KEY_LEN 100
34 
35 /*
36  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
37  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
38  * policy [un]register operations including cgroup file additions /
39  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
40  * allows grabbing it from cgroup callbacks.
41  */
42 static DEFINE_MUTEX(blkcg_pol_register_mutex);
43 static DEFINE_MUTEX(blkcg_pol_mutex);
44 
45 struct blkcg blkcg_root;
46 EXPORT_SYMBOL_GPL(blkcg_root);
47 
48 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
49 
50 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
51 
52 static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
53 
54 static bool blkcg_debug_stats = false;
55 
blkcg_policy_enabled(struct request_queue * q,const struct blkcg_policy * pol)56 static bool blkcg_policy_enabled(struct request_queue *q,
57 				 const struct blkcg_policy *pol)
58 {
59 	return pol && test_bit(pol->plid, q->blkcg_pols);
60 }
61 
62 /**
63  * blkg_free - free a blkg
64  * @blkg: blkg to free
65  *
66  * Free @blkg which may be partially allocated.
67  */
blkg_free(struct blkcg_gq * blkg)68 static void blkg_free(struct blkcg_gq *blkg)
69 {
70 	int i;
71 
72 	if (!blkg)
73 		return;
74 
75 	for (i = 0; i < BLKCG_MAX_POLS; i++)
76 		if (blkg->pd[i])
77 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
78 
79 	if (blkg->blkcg != &blkcg_root)
80 		blk_exit_rl(blkg->q, &blkg->rl);
81 
82 	blkg_rwstat_exit(&blkg->stat_ios);
83 	blkg_rwstat_exit(&blkg->stat_bytes);
84 	kfree(blkg);
85 }
86 
87 /**
88  * blkg_alloc - allocate a blkg
89  * @blkcg: block cgroup the new blkg is associated with
90  * @q: request_queue the new blkg is associated with
91  * @gfp_mask: allocation mask to use
92  *
93  * Allocate a new blkg assocating @blkcg and @q.
94  */
blkg_alloc(struct blkcg * blkcg,struct request_queue * q,gfp_t gfp_mask)95 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
96 				   gfp_t gfp_mask)
97 {
98 	struct blkcg_gq *blkg;
99 	int i;
100 
101 	/* alloc and init base part */
102 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
103 	if (!blkg)
104 		return NULL;
105 
106 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
107 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
108 		goto err_free;
109 
110 	blkg->q = q;
111 	INIT_LIST_HEAD(&blkg->q_node);
112 	blkg->blkcg = blkcg;
113 	atomic_set(&blkg->refcnt, 1);
114 
115 	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
116 	if (blkcg != &blkcg_root) {
117 		if (blk_init_rl(&blkg->rl, q, gfp_mask))
118 			goto err_free;
119 		blkg->rl.blkg = blkg;
120 	}
121 
122 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
123 		struct blkcg_policy *pol = blkcg_policy[i];
124 		struct blkg_policy_data *pd;
125 
126 		if (!blkcg_policy_enabled(q, pol))
127 			continue;
128 
129 		/* alloc per-policy data and attach it to blkg */
130 		pd = pol->pd_alloc_fn(gfp_mask, q->node);
131 		if (!pd)
132 			goto err_free;
133 
134 		blkg->pd[i] = pd;
135 		pd->blkg = blkg;
136 		pd->plid = i;
137 	}
138 
139 	return blkg;
140 
141 err_free:
142 	blkg_free(blkg);
143 	return NULL;
144 }
145 
blkg_lookup_slowpath(struct blkcg * blkcg,struct request_queue * q,bool update_hint)146 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
147 				      struct request_queue *q, bool update_hint)
148 {
149 	struct blkcg_gq *blkg;
150 
151 	/*
152 	 * Hint didn't match.  Look up from the radix tree.  Note that the
153 	 * hint can only be updated under queue_lock as otherwise @blkg
154 	 * could have already been removed from blkg_tree.  The caller is
155 	 * responsible for grabbing queue_lock if @update_hint.
156 	 */
157 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
158 	if (blkg && blkg->q == q) {
159 		if (update_hint) {
160 			lockdep_assert_held(q->queue_lock);
161 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
162 		}
163 		return blkg;
164 	}
165 
166 	return NULL;
167 }
168 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
169 
170 /*
171  * If @new_blkg is %NULL, this function tries to allocate a new one as
172  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
173  */
blkg_create(struct blkcg * blkcg,struct request_queue * q,struct blkcg_gq * new_blkg)174 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
175 				    struct request_queue *q,
176 				    struct blkcg_gq *new_blkg)
177 {
178 	struct blkcg_gq *blkg;
179 	struct bdi_writeback_congested *wb_congested;
180 	int i, ret;
181 
182 	WARN_ON_ONCE(!rcu_read_lock_held());
183 	lockdep_assert_held(q->queue_lock);
184 
185 	/* blkg holds a reference to blkcg */
186 	if (!css_tryget_online(&blkcg->css)) {
187 		ret = -ENODEV;
188 		goto err_free_blkg;
189 	}
190 
191 	wb_congested = wb_congested_get_create(q->backing_dev_info,
192 					       blkcg->css.id,
193 					       GFP_NOWAIT | __GFP_NOWARN);
194 	if (!wb_congested) {
195 		ret = -ENOMEM;
196 		goto err_put_css;
197 	}
198 
199 	/* allocate */
200 	if (!new_blkg) {
201 		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
202 		if (unlikely(!new_blkg)) {
203 			ret = -ENOMEM;
204 			goto err_put_congested;
205 		}
206 	}
207 	blkg = new_blkg;
208 	blkg->wb_congested = wb_congested;
209 
210 	/* link parent */
211 	if (blkcg_parent(blkcg)) {
212 		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
213 		if (WARN_ON_ONCE(!blkg->parent)) {
214 			ret = -ENODEV;
215 			goto err_put_congested;
216 		}
217 		blkg_get(blkg->parent);
218 	}
219 
220 	/* invoke per-policy init */
221 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
222 		struct blkcg_policy *pol = blkcg_policy[i];
223 
224 		if (blkg->pd[i] && pol->pd_init_fn)
225 			pol->pd_init_fn(blkg->pd[i]);
226 	}
227 
228 	/* insert */
229 	spin_lock(&blkcg->lock);
230 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
231 	if (likely(!ret)) {
232 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
233 		list_add(&blkg->q_node, &q->blkg_list);
234 
235 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
236 			struct blkcg_policy *pol = blkcg_policy[i];
237 
238 			if (blkg->pd[i] && pol->pd_online_fn)
239 				pol->pd_online_fn(blkg->pd[i]);
240 		}
241 	}
242 	blkg->online = true;
243 	spin_unlock(&blkcg->lock);
244 
245 	if (!ret)
246 		return blkg;
247 
248 	/* @blkg failed fully initialized, use the usual release path */
249 	blkg_put(blkg);
250 	return ERR_PTR(ret);
251 
252 err_put_congested:
253 	wb_congested_put(wb_congested);
254 err_put_css:
255 	css_put(&blkcg->css);
256 err_free_blkg:
257 	blkg_free(new_blkg);
258 	return ERR_PTR(ret);
259 }
260 
261 /**
262  * blkg_lookup_create - lookup blkg, try to create one if not there
263  * @blkcg: blkcg of interest
264  * @q: request_queue of interest
265  *
266  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
267  * create one.  blkg creation is performed recursively from blkcg_root such
268  * that all non-root blkg's have access to the parent blkg.  This function
269  * should be called under RCU read lock and @q->queue_lock.
270  *
271  * Returns pointer to the looked up or created blkg on success, ERR_PTR()
272  * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
273  * dead and bypassing, returns ERR_PTR(-EBUSY).
274  */
blkg_lookup_create(struct blkcg * blkcg,struct request_queue * q)275 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
276 				    struct request_queue *q)
277 {
278 	struct blkcg_gq *blkg;
279 
280 	WARN_ON_ONCE(!rcu_read_lock_held());
281 	lockdep_assert_held(q->queue_lock);
282 
283 	/*
284 	 * This could be the first entry point of blkcg implementation and
285 	 * we shouldn't allow anything to go through for a bypassing queue.
286 	 */
287 	if (unlikely(blk_queue_bypass(q)))
288 		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
289 
290 	blkg = __blkg_lookup(blkcg, q, true);
291 	if (blkg)
292 		return blkg;
293 
294 	/*
295 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
296 	 * non-root blkgs have access to their parents.
297 	 */
298 	while (true) {
299 		struct blkcg *pos = blkcg;
300 		struct blkcg *parent = blkcg_parent(blkcg);
301 
302 		while (parent && !__blkg_lookup(parent, q, false)) {
303 			pos = parent;
304 			parent = blkcg_parent(parent);
305 		}
306 
307 		blkg = blkg_create(pos, q, NULL);
308 		if (pos == blkcg || IS_ERR(blkg))
309 			return blkg;
310 	}
311 }
312 
blkg_destroy(struct blkcg_gq * blkg)313 static void blkg_destroy(struct blkcg_gq *blkg)
314 {
315 	struct blkcg *blkcg = blkg->blkcg;
316 	struct blkcg_gq *parent = blkg->parent;
317 	int i;
318 
319 	lockdep_assert_held(blkg->q->queue_lock);
320 	lockdep_assert_held(&blkcg->lock);
321 
322 	/* Something wrong if we are trying to remove same group twice */
323 	WARN_ON_ONCE(list_empty(&blkg->q_node));
324 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
325 
326 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
327 		struct blkcg_policy *pol = blkcg_policy[i];
328 
329 		if (blkg->pd[i] && pol->pd_offline_fn)
330 			pol->pd_offline_fn(blkg->pd[i]);
331 	}
332 
333 	if (parent) {
334 		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
335 		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
336 	}
337 
338 	blkg->online = false;
339 
340 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
341 	list_del_init(&blkg->q_node);
342 	hlist_del_init_rcu(&blkg->blkcg_node);
343 
344 	/*
345 	 * Both setting lookup hint to and clearing it from @blkg are done
346 	 * under queue_lock.  If it's not pointing to @blkg now, it never
347 	 * will.  Hint assignment itself can race safely.
348 	 */
349 	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
350 		rcu_assign_pointer(blkcg->blkg_hint, NULL);
351 
352 	/*
353 	 * Put the reference taken at the time of creation so that when all
354 	 * queues are gone, group can be destroyed.
355 	 */
356 	blkg_put(blkg);
357 }
358 
359 /**
360  * blkg_destroy_all - destroy all blkgs associated with a request_queue
361  * @q: request_queue of interest
362  *
363  * Destroy all blkgs associated with @q.
364  */
blkg_destroy_all(struct request_queue * q)365 static void blkg_destroy_all(struct request_queue *q)
366 {
367 	struct blkcg_gq *blkg, *n;
368 
369 	lockdep_assert_held(q->queue_lock);
370 
371 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
372 		struct blkcg *blkcg = blkg->blkcg;
373 
374 		spin_lock(&blkcg->lock);
375 		blkg_destroy(blkg);
376 		spin_unlock(&blkcg->lock);
377 	}
378 
379 	q->root_blkg = NULL;
380 	q->root_rl.blkg = NULL;
381 }
382 
383 /*
384  * A group is RCU protected, but having an rcu lock does not mean that one
385  * can access all the fields of blkg and assume these are valid.  For
386  * example, don't try to follow throtl_data and request queue links.
387  *
388  * Having a reference to blkg under an rcu allows accesses to only values
389  * local to groups like group stats and group rate limits.
390  */
__blkg_release_rcu(struct rcu_head * rcu_head)391 void __blkg_release_rcu(struct rcu_head *rcu_head)
392 {
393 	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
394 
395 	/* release the blkcg and parent blkg refs this blkg has been holding */
396 	css_put(&blkg->blkcg->css);
397 	if (blkg->parent)
398 		blkg_put(blkg->parent);
399 
400 	wb_congested_put(blkg->wb_congested);
401 
402 	blkg_free(blkg);
403 }
404 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
405 
406 /*
407  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
408  * because the root blkg uses @q->root_rl instead of its own rl.
409  */
__blk_queue_next_rl(struct request_list * rl,struct request_queue * q)410 struct request_list *__blk_queue_next_rl(struct request_list *rl,
411 					 struct request_queue *q)
412 {
413 	struct list_head *ent;
414 	struct blkcg_gq *blkg;
415 
416 	/*
417 	 * Determine the current blkg list_head.  The first entry is
418 	 * root_rl which is off @q->blkg_list and mapped to the head.
419 	 */
420 	if (rl == &q->root_rl) {
421 		ent = &q->blkg_list;
422 		/* There are no more block groups, hence no request lists */
423 		if (list_empty(ent))
424 			return NULL;
425 	} else {
426 		blkg = container_of(rl, struct blkcg_gq, rl);
427 		ent = &blkg->q_node;
428 	}
429 
430 	/* walk to the next list_head, skip root blkcg */
431 	ent = ent->next;
432 	if (ent == &q->root_blkg->q_node)
433 		ent = ent->next;
434 	if (ent == &q->blkg_list)
435 		return NULL;
436 
437 	blkg = container_of(ent, struct blkcg_gq, q_node);
438 	return &blkg->rl;
439 }
440 
blkcg_reset_stats(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)441 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
442 			     struct cftype *cftype, u64 val)
443 {
444 	struct blkcg *blkcg = css_to_blkcg(css);
445 	struct blkcg_gq *blkg;
446 	int i;
447 
448 	mutex_lock(&blkcg_pol_mutex);
449 	spin_lock_irq(&blkcg->lock);
450 
451 	/*
452 	 * Note that stat reset is racy - it doesn't synchronize against
453 	 * stat updates.  This is a debug feature which shouldn't exist
454 	 * anyway.  If you get hit by a race, retry.
455 	 */
456 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
457 		blkg_rwstat_reset(&blkg->stat_bytes);
458 		blkg_rwstat_reset(&blkg->stat_ios);
459 
460 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
461 			struct blkcg_policy *pol = blkcg_policy[i];
462 
463 			if (blkg->pd[i] && pol->pd_reset_stats_fn)
464 				pol->pd_reset_stats_fn(blkg->pd[i]);
465 		}
466 	}
467 
468 	spin_unlock_irq(&blkcg->lock);
469 	mutex_unlock(&blkcg_pol_mutex);
470 	return 0;
471 }
472 
blkg_dev_name(struct blkcg_gq * blkg)473 const char *blkg_dev_name(struct blkcg_gq *blkg)
474 {
475 	/* some drivers (floppy) instantiate a queue w/o disk registered */
476 	if (blkg->q->backing_dev_info->dev)
477 		return bdi_dev_name(blkg->q->backing_dev_info);
478 	return NULL;
479 }
480 EXPORT_SYMBOL_GPL(blkg_dev_name);
481 
482 /**
483  * blkcg_print_blkgs - helper for printing per-blkg data
484  * @sf: seq_file to print to
485  * @blkcg: blkcg of interest
486  * @prfill: fill function to print out a blkg
487  * @pol: policy in question
488  * @data: data to be passed to @prfill
489  * @show_total: to print out sum of prfill return values or not
490  *
491  * This function invokes @prfill on each blkg of @blkcg if pd for the
492  * policy specified by @pol exists.  @prfill is invoked with @sf, the
493  * policy data and @data and the matching queue lock held.  If @show_total
494  * is %true, the sum of the return values from @prfill is printed with
495  * "Total" label at the end.
496  *
497  * This is to be used to construct print functions for
498  * cftype->read_seq_string method.
499  */
blkcg_print_blkgs(struct seq_file * sf,struct blkcg * blkcg,u64 (* prfill)(struct seq_file *,struct blkg_policy_data *,int),const struct blkcg_policy * pol,int data,bool show_total)500 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
501 		       u64 (*prfill)(struct seq_file *,
502 				     struct blkg_policy_data *, int),
503 		       const struct blkcg_policy *pol, int data,
504 		       bool show_total)
505 {
506 	struct blkcg_gq *blkg;
507 	u64 total = 0;
508 
509 	rcu_read_lock();
510 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
511 		spin_lock_irq(blkg->q->queue_lock);
512 		if (blkcg_policy_enabled(blkg->q, pol))
513 			total += prfill(sf, blkg->pd[pol->plid], data);
514 		spin_unlock_irq(blkg->q->queue_lock);
515 	}
516 	rcu_read_unlock();
517 
518 	if (show_total)
519 		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
520 }
521 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
522 
523 /**
524  * __blkg_prfill_u64 - prfill helper for a single u64 value
525  * @sf: seq_file to print to
526  * @pd: policy private data of interest
527  * @v: value to print
528  *
529  * Print @v to @sf for the device assocaited with @pd.
530  */
__blkg_prfill_u64(struct seq_file * sf,struct blkg_policy_data * pd,u64 v)531 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
532 {
533 	const char *dname = blkg_dev_name(pd->blkg);
534 
535 	if (!dname)
536 		return 0;
537 
538 	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
539 	return v;
540 }
541 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
542 
543 /**
544  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
545  * @sf: seq_file to print to
546  * @pd: policy private data of interest
547  * @rwstat: rwstat to print
548  *
549  * Print @rwstat to @sf for the device assocaited with @pd.
550  */
__blkg_prfill_rwstat(struct seq_file * sf,struct blkg_policy_data * pd,const struct blkg_rwstat * rwstat)551 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
552 			 const struct blkg_rwstat *rwstat)
553 {
554 	static const char *rwstr[] = {
555 		[BLKG_RWSTAT_READ]	= "Read",
556 		[BLKG_RWSTAT_WRITE]	= "Write",
557 		[BLKG_RWSTAT_SYNC]	= "Sync",
558 		[BLKG_RWSTAT_ASYNC]	= "Async",
559 		[BLKG_RWSTAT_DISCARD]	= "Discard",
560 	};
561 	const char *dname = blkg_dev_name(pd->blkg);
562 	u64 v;
563 	int i;
564 
565 	if (!dname)
566 		return 0;
567 
568 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
569 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
570 			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
571 
572 	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
573 		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
574 		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
575 	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
576 	return v;
577 }
578 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
579 
580 /**
581  * blkg_prfill_stat - prfill callback for blkg_stat
582  * @sf: seq_file to print to
583  * @pd: policy private data of interest
584  * @off: offset to the blkg_stat in @pd
585  *
586  * prfill callback for printing a blkg_stat.
587  */
blkg_prfill_stat(struct seq_file * sf,struct blkg_policy_data * pd,int off)588 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
589 {
590 	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
591 }
592 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
593 
594 /**
595  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
596  * @sf: seq_file to print to
597  * @pd: policy private data of interest
598  * @off: offset to the blkg_rwstat in @pd
599  *
600  * prfill callback for printing a blkg_rwstat.
601  */
blkg_prfill_rwstat(struct seq_file * sf,struct blkg_policy_data * pd,int off)602 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
603 		       int off)
604 {
605 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
606 
607 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
608 }
609 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
610 
blkg_prfill_rwstat_field(struct seq_file * sf,struct blkg_policy_data * pd,int off)611 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
612 				    struct blkg_policy_data *pd, int off)
613 {
614 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
615 
616 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
617 }
618 
619 /**
620  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
621  * @sf: seq_file to print to
622  * @v: unused
623  *
624  * To be used as cftype->seq_show to print blkg->stat_bytes.
625  * cftype->private must be set to the blkcg_policy.
626  */
blkg_print_stat_bytes(struct seq_file * sf,void * v)627 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
628 {
629 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
630 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
631 			  offsetof(struct blkcg_gq, stat_bytes), true);
632 	return 0;
633 }
634 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
635 
636 /**
637  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
638  * @sf: seq_file to print to
639  * @v: unused
640  *
641  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
642  * must be set to the blkcg_policy.
643  */
blkg_print_stat_ios(struct seq_file * sf,void * v)644 int blkg_print_stat_ios(struct seq_file *sf, void *v)
645 {
646 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
647 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
648 			  offsetof(struct blkcg_gq, stat_ios), true);
649 	return 0;
650 }
651 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
652 
blkg_prfill_rwstat_field_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)653 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
654 					      struct blkg_policy_data *pd,
655 					      int off)
656 {
657 	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
658 							      NULL, off);
659 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
660 }
661 
662 /**
663  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
664  * @sf: seq_file to print to
665  * @v: unused
666  */
blkg_print_stat_bytes_recursive(struct seq_file * sf,void * v)667 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
668 {
669 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
670 			  blkg_prfill_rwstat_field_recursive,
671 			  (void *)seq_cft(sf)->private,
672 			  offsetof(struct blkcg_gq, stat_bytes), true);
673 	return 0;
674 }
675 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
676 
677 /**
678  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
679  * @sf: seq_file to print to
680  * @v: unused
681  */
blkg_print_stat_ios_recursive(struct seq_file * sf,void * v)682 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
683 {
684 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
685 			  blkg_prfill_rwstat_field_recursive,
686 			  (void *)seq_cft(sf)->private,
687 			  offsetof(struct blkcg_gq, stat_ios), true);
688 	return 0;
689 }
690 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
691 
692 /**
693  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
694  * @blkg: blkg of interest
695  * @pol: blkcg_policy which contains the blkg_stat
696  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
697  *
698  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
699  * online descendants and their aux counts.  The caller must be holding the
700  * queue lock for online tests.
701  *
702  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
703  * at @off bytes into @blkg's blkg_policy_data of the policy.
704  */
blkg_stat_recursive_sum(struct blkcg_gq * blkg,struct blkcg_policy * pol,int off)705 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
706 			    struct blkcg_policy *pol, int off)
707 {
708 	struct blkcg_gq *pos_blkg;
709 	struct cgroup_subsys_state *pos_css;
710 	u64 sum = 0;
711 
712 	lockdep_assert_held(blkg->q->queue_lock);
713 
714 	rcu_read_lock();
715 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
716 		struct blkg_stat *stat;
717 
718 		if (!pos_blkg->online)
719 			continue;
720 
721 		if (pol)
722 			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
723 		else
724 			stat = (void *)blkg + off;
725 
726 		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
727 	}
728 	rcu_read_unlock();
729 
730 	return sum;
731 }
732 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
733 
734 /**
735  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
736  * @blkg: blkg of interest
737  * @pol: blkcg_policy which contains the blkg_rwstat
738  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
739  *
740  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
741  * online descendants and their aux counts.  The caller must be holding the
742  * queue lock for online tests.
743  *
744  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
745  * is at @off bytes into @blkg's blkg_policy_data of the policy.
746  */
blkg_rwstat_recursive_sum(struct blkcg_gq * blkg,struct blkcg_policy * pol,int off)747 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
748 					     struct blkcg_policy *pol, int off)
749 {
750 	struct blkcg_gq *pos_blkg;
751 	struct cgroup_subsys_state *pos_css;
752 	struct blkg_rwstat sum = { };
753 	int i;
754 
755 	lockdep_assert_held(blkg->q->queue_lock);
756 
757 	rcu_read_lock();
758 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
759 		struct blkg_rwstat *rwstat;
760 
761 		if (!pos_blkg->online)
762 			continue;
763 
764 		if (pol)
765 			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
766 		else
767 			rwstat = (void *)pos_blkg + off;
768 
769 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
770 			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
771 				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
772 				&sum.aux_cnt[i]);
773 	}
774 	rcu_read_unlock();
775 
776 	return sum;
777 }
778 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
779 
780 /* Performs queue bypass and policy enabled checks then looks up blkg. */
blkg_lookup_check(struct blkcg * blkcg,const struct blkcg_policy * pol,struct request_queue * q)781 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
782 					  const struct blkcg_policy *pol,
783 					  struct request_queue *q)
784 {
785 	WARN_ON_ONCE(!rcu_read_lock_held());
786 	lockdep_assert_held(q->queue_lock);
787 
788 	if (!blkcg_policy_enabled(q, pol))
789 		return ERR_PTR(-EOPNOTSUPP);
790 
791 	/*
792 	 * This could be the first entry point of blkcg implementation and
793 	 * we shouldn't allow anything to go through for a bypassing queue.
794 	 */
795 	if (unlikely(blk_queue_bypass(q)))
796 		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
797 
798 	return __blkg_lookup(blkcg, q, true /* update_hint */);
799 }
800 
801 /**
802  * blkg_conf_prep - parse and prepare for per-blkg config update
803  * @blkcg: target block cgroup
804  * @pol: target policy
805  * @input: input string
806  * @ctx: blkg_conf_ctx to be filled
807  *
808  * Parse per-blkg config update from @input and initialize @ctx with the
809  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
810  * part of @input following MAJ:MIN.  This function returns with RCU read
811  * lock and queue lock held and must be paired with blkg_conf_finish().
812  */
blkg_conf_prep(struct blkcg * blkcg,const struct blkcg_policy * pol,char * input,struct blkg_conf_ctx * ctx)813 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
814 		   char *input, struct blkg_conf_ctx *ctx)
815 	__acquires(rcu) __acquires(disk->queue->queue_lock)
816 {
817 	struct gendisk *disk;
818 	struct request_queue *q;
819 	struct blkcg_gq *blkg;
820 	unsigned int major, minor;
821 	int key_len, part, ret;
822 	char *body;
823 
824 	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
825 		return -EINVAL;
826 
827 	body = input + key_len;
828 	if (!isspace(*body))
829 		return -EINVAL;
830 	body = skip_spaces(body);
831 
832 	disk = get_gendisk(MKDEV(major, minor), &part);
833 	if (!disk)
834 		return -ENODEV;
835 	if (part) {
836 		ret = -ENODEV;
837 		goto fail;
838 	}
839 
840 	q = disk->queue;
841 
842 	rcu_read_lock();
843 	spin_lock_irq(q->queue_lock);
844 
845 	blkg = blkg_lookup_check(blkcg, pol, q);
846 	if (IS_ERR(blkg)) {
847 		ret = PTR_ERR(blkg);
848 		goto fail_unlock;
849 	}
850 
851 	if (blkg)
852 		goto success;
853 
854 	/*
855 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
856 	 * non-root blkgs have access to their parents.
857 	 */
858 	while (true) {
859 		struct blkcg *pos = blkcg;
860 		struct blkcg *parent;
861 		struct blkcg_gq *new_blkg;
862 
863 		parent = blkcg_parent(blkcg);
864 		while (parent && !__blkg_lookup(parent, q, false)) {
865 			pos = parent;
866 			parent = blkcg_parent(parent);
867 		}
868 
869 		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
870 		spin_unlock_irq(q->queue_lock);
871 		rcu_read_unlock();
872 
873 		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
874 		if (unlikely(!new_blkg)) {
875 			ret = -ENOMEM;
876 			goto fail;
877 		}
878 
879 		if (radix_tree_preload(GFP_KERNEL)) {
880 			blkg_free(new_blkg);
881 			ret = -ENOMEM;
882 			goto fail;
883 		}
884 
885 		rcu_read_lock();
886 		spin_lock_irq(q->queue_lock);
887 
888 		blkg = blkg_lookup_check(pos, pol, q);
889 		if (IS_ERR(blkg)) {
890 			ret = PTR_ERR(blkg);
891 			blkg_free(new_blkg);
892 			goto fail_preloaded;
893 		}
894 
895 		if (blkg) {
896 			blkg_free(new_blkg);
897 		} else {
898 			blkg = blkg_create(pos, q, new_blkg);
899 			if (unlikely(IS_ERR(blkg))) {
900 				ret = PTR_ERR(blkg);
901 				goto fail_preloaded;
902 			}
903 		}
904 
905 		radix_tree_preload_end();
906 
907 		if (pos == blkcg)
908 			goto success;
909 	}
910 success:
911 	ctx->disk = disk;
912 	ctx->blkg = blkg;
913 	ctx->body = body;
914 	return 0;
915 
916 fail_preloaded:
917 	radix_tree_preload_end();
918 fail_unlock:
919 	spin_unlock_irq(q->queue_lock);
920 	rcu_read_unlock();
921 fail:
922 	put_disk_and_module(disk);
923 	/*
924 	 * If queue was bypassing, we should retry.  Do so after a
925 	 * short msleep().  It isn't strictly necessary but queue
926 	 * can be bypassing for some time and it's always nice to
927 	 * avoid busy looping.
928 	 */
929 	if (ret == -EBUSY) {
930 		msleep(10);
931 		ret = restart_syscall();
932 	}
933 	return ret;
934 }
935 EXPORT_SYMBOL_GPL(blkg_conf_prep);
936 
937 /**
938  * blkg_conf_finish - finish up per-blkg config update
939  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
940  *
941  * Finish up after per-blkg config update.  This function must be paired
942  * with blkg_conf_prep().
943  */
blkg_conf_finish(struct blkg_conf_ctx * ctx)944 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
945 	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
946 {
947 	spin_unlock_irq(ctx->disk->queue->queue_lock);
948 	rcu_read_unlock();
949 	put_disk_and_module(ctx->disk);
950 }
951 EXPORT_SYMBOL_GPL(blkg_conf_finish);
952 
blkcg_print_stat(struct seq_file * sf,void * v)953 static int blkcg_print_stat(struct seq_file *sf, void *v)
954 {
955 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
956 	struct blkcg_gq *blkg;
957 
958 	rcu_read_lock();
959 
960 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
961 		const char *dname;
962 		char *buf;
963 		struct blkg_rwstat rwstat;
964 		u64 rbytes, wbytes, rios, wios, dbytes, dios;
965 		size_t size = seq_get_buf(sf, &buf), off = 0;
966 		int i;
967 		bool has_stats = false;
968 
969 		spin_lock_irq(blkg->q->queue_lock);
970 
971 		if (!blkg->online)
972 			goto skip;
973 
974 		dname = blkg_dev_name(blkg);
975 		if (!dname)
976 			goto skip;
977 
978 		/*
979 		 * Hooray string manipulation, count is the size written NOT
980 		 * INCLUDING THE \0, so size is now count+1 less than what we
981 		 * had before, but we want to start writing the next bit from
982 		 * the \0 so we only add count to buf.
983 		 */
984 		off += scnprintf(buf+off, size-off, "%s ", dname);
985 
986 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
987 					offsetof(struct blkcg_gq, stat_bytes));
988 		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
989 		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
990 		dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
991 
992 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
993 					offsetof(struct blkcg_gq, stat_ios));
994 		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
995 		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
996 		dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
997 
998 		if (rbytes || wbytes || rios || wios) {
999 			has_stats = true;
1000 			off += scnprintf(buf+off, size-off,
1001 					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
1002 					 rbytes, wbytes, rios, wios,
1003 					 dbytes, dios);
1004 		}
1005 
1006 		if (!blkcg_debug_stats)
1007 			goto next;
1008 
1009 		if (atomic_read(&blkg->use_delay)) {
1010 			has_stats = true;
1011 			off += scnprintf(buf+off, size-off,
1012 					 " use_delay=%d delay_nsec=%llu",
1013 					 atomic_read(&blkg->use_delay),
1014 					(unsigned long long)atomic64_read(&blkg->delay_nsec));
1015 		}
1016 
1017 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
1018 			struct blkcg_policy *pol = blkcg_policy[i];
1019 			size_t written;
1020 
1021 			if (!blkg->pd[i] || !pol->pd_stat_fn)
1022 				continue;
1023 
1024 			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1025 			if (written)
1026 				has_stats = true;
1027 			off += written;
1028 		}
1029 next:
1030 		if (has_stats) {
1031 			if (off < size - 1) {
1032 				off += scnprintf(buf+off, size-off, "\n");
1033 				seq_commit(sf, off);
1034 			} else {
1035 				seq_commit(sf, -1);
1036 			}
1037 		}
1038 	skip:
1039 		spin_unlock_irq(blkg->q->queue_lock);
1040 	}
1041 
1042 	rcu_read_unlock();
1043 	return 0;
1044 }
1045 
1046 static struct cftype blkcg_files[] = {
1047 	{
1048 		.name = "stat",
1049 		.flags = CFTYPE_NOT_ON_ROOT,
1050 		.seq_show = blkcg_print_stat,
1051 	},
1052 	{ }	/* terminate */
1053 };
1054 
1055 static struct cftype blkcg_legacy_files[] = {
1056 	{
1057 		.name = "reset_stats",
1058 		.write_u64 = blkcg_reset_stats,
1059 	},
1060 	{ }	/* terminate */
1061 };
1062 
1063 /*
1064  * blkcg destruction is a three-stage process.
1065  *
1066  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1067  *    which offlines writeback.  Here we tie the next stage of blkg destruction
1068  *    to the completion of writeback associated with the blkcg.  This lets us
1069  *    avoid punting potentially large amounts of outstanding writeback to root
1070  *    while maintaining any ongoing policies.  The next stage is triggered when
1071  *    the nr_cgwbs count goes to zero.
1072  *
1073  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1074  *    and handles the destruction of blkgs.  Here the css reference held by
1075  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1076  *    This work may occur in cgwb_release_workfn() on the cgwb_release
1077  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1078  *    punted to the root_blkg.
1079  *
1080  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1081  *    This finally frees the blkcg.
1082  */
1083 
1084 /**
1085  * blkcg_css_offline - cgroup css_offline callback
1086  * @css: css of interest
1087  *
1088  * This function is called when @css is about to go away.  Here the cgwbs are
1089  * offlined first and only once writeback associated with the blkcg has
1090  * finished do we start step 2 (see above).
1091  */
blkcg_css_offline(struct cgroup_subsys_state * css)1092 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1093 {
1094 	struct blkcg *blkcg = css_to_blkcg(css);
1095 
1096 	/* this prevents anyone from attaching or migrating to this blkcg */
1097 	wb_blkcg_offline(blkcg);
1098 
1099 	/* put the base cgwb reference allowing step 2 to be triggered */
1100 	blkcg_cgwb_put(blkcg);
1101 }
1102 
1103 /**
1104  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1105  * @blkcg: blkcg of interest
1106  *
1107  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1108  * is nested inside q lock, this function performs reverse double lock dancing.
1109  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1110  * blkcg_css_free to eventually be called.
1111  *
1112  * This is the blkcg counterpart of ioc_release_fn().
1113  */
blkcg_destroy_blkgs(struct blkcg * blkcg)1114 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1115 {
1116 	spin_lock_irq(&blkcg->lock);
1117 
1118 	while (!hlist_empty(&blkcg->blkg_list)) {
1119 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1120 						struct blkcg_gq, blkcg_node);
1121 		struct request_queue *q = blkg->q;
1122 
1123 		if (spin_trylock(q->queue_lock)) {
1124 			blkg_destroy(blkg);
1125 			spin_unlock(q->queue_lock);
1126 		} else {
1127 			spin_unlock_irq(&blkcg->lock);
1128 			cpu_relax();
1129 			spin_lock_irq(&blkcg->lock);
1130 		}
1131 	}
1132 
1133 	spin_unlock_irq(&blkcg->lock);
1134 }
1135 
blkcg_css_free(struct cgroup_subsys_state * css)1136 static void blkcg_css_free(struct cgroup_subsys_state *css)
1137 {
1138 	struct blkcg *blkcg = css_to_blkcg(css);
1139 	int i;
1140 
1141 	mutex_lock(&blkcg_pol_mutex);
1142 
1143 	list_del(&blkcg->all_blkcgs_node);
1144 
1145 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1146 		if (blkcg->cpd[i])
1147 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1148 
1149 	mutex_unlock(&blkcg_pol_mutex);
1150 
1151 	kfree(blkcg);
1152 }
1153 
1154 static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state * parent_css)1155 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1156 {
1157 	struct blkcg *blkcg;
1158 	struct cgroup_subsys_state *ret;
1159 	int i;
1160 
1161 	mutex_lock(&blkcg_pol_mutex);
1162 
1163 	if (!parent_css) {
1164 		blkcg = &blkcg_root;
1165 	} else {
1166 		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1167 		if (!blkcg) {
1168 			ret = ERR_PTR(-ENOMEM);
1169 			goto unlock;
1170 		}
1171 	}
1172 
1173 	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1174 		struct blkcg_policy *pol = blkcg_policy[i];
1175 		struct blkcg_policy_data *cpd;
1176 
1177 		/*
1178 		 * If the policy hasn't been attached yet, wait for it
1179 		 * to be attached before doing anything else. Otherwise,
1180 		 * check if the policy requires any specific per-cgroup
1181 		 * data: if it does, allocate and initialize it.
1182 		 */
1183 		if (!pol || !pol->cpd_alloc_fn)
1184 			continue;
1185 
1186 		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1187 		if (!cpd) {
1188 			ret = ERR_PTR(-ENOMEM);
1189 			goto free_pd_blkcg;
1190 		}
1191 		blkcg->cpd[i] = cpd;
1192 		cpd->blkcg = blkcg;
1193 		cpd->plid = i;
1194 		if (pol->cpd_init_fn)
1195 			pol->cpd_init_fn(cpd);
1196 	}
1197 
1198 	spin_lock_init(&blkcg->lock);
1199 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1200 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1201 #ifdef CONFIG_CGROUP_WRITEBACK
1202 	INIT_LIST_HEAD(&blkcg->cgwb_list);
1203 	refcount_set(&blkcg->cgwb_refcnt, 1);
1204 #endif
1205 	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1206 
1207 	mutex_unlock(&blkcg_pol_mutex);
1208 	return &blkcg->css;
1209 
1210 free_pd_blkcg:
1211 	for (i--; i >= 0; i--)
1212 		if (blkcg->cpd[i])
1213 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1214 
1215 	if (blkcg != &blkcg_root)
1216 		kfree(blkcg);
1217 unlock:
1218 	mutex_unlock(&blkcg_pol_mutex);
1219 	return ret;
1220 }
1221 
1222 /**
1223  * blkcg_init_queue - initialize blkcg part of request queue
1224  * @q: request_queue to initialize
1225  *
1226  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1227  * part of new request_queue @q.
1228  *
1229  * RETURNS:
1230  * 0 on success, -errno on failure.
1231  */
blkcg_init_queue(struct request_queue * q)1232 int blkcg_init_queue(struct request_queue *q)
1233 {
1234 	struct blkcg_gq *new_blkg, *blkg;
1235 	bool preloaded;
1236 	int ret;
1237 
1238 	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1239 	if (!new_blkg)
1240 		return -ENOMEM;
1241 
1242 	preloaded = !radix_tree_preload(GFP_KERNEL);
1243 
1244 	/* Make sure the root blkg exists. */
1245 	rcu_read_lock();
1246 	spin_lock_irq(q->queue_lock);
1247 	blkg = blkg_create(&blkcg_root, q, new_blkg);
1248 	if (IS_ERR(blkg))
1249 		goto err_unlock;
1250 	q->root_blkg = blkg;
1251 	q->root_rl.blkg = blkg;
1252 	spin_unlock_irq(q->queue_lock);
1253 	rcu_read_unlock();
1254 
1255 	if (preloaded)
1256 		radix_tree_preload_end();
1257 
1258 	ret = blk_iolatency_init(q);
1259 	if (ret) {
1260 		spin_lock_irq(q->queue_lock);
1261 		blkg_destroy_all(q);
1262 		spin_unlock_irq(q->queue_lock);
1263 		return ret;
1264 	}
1265 
1266 	ret = blk_throtl_init(q);
1267 	if (ret) {
1268 		spin_lock_irq(q->queue_lock);
1269 		blkg_destroy_all(q);
1270 		spin_unlock_irq(q->queue_lock);
1271 	}
1272 	return ret;
1273 
1274 err_unlock:
1275 	spin_unlock_irq(q->queue_lock);
1276 	rcu_read_unlock();
1277 	if (preloaded)
1278 		radix_tree_preload_end();
1279 	return PTR_ERR(blkg);
1280 }
1281 
1282 /**
1283  * blkcg_drain_queue - drain blkcg part of request_queue
1284  * @q: request_queue to drain
1285  *
1286  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1287  */
blkcg_drain_queue(struct request_queue * q)1288 void blkcg_drain_queue(struct request_queue *q)
1289 {
1290 	lockdep_assert_held(q->queue_lock);
1291 
1292 	/*
1293 	 * @q could be exiting and already have destroyed all blkgs as
1294 	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1295 	 */
1296 	if (!q->root_blkg)
1297 		return;
1298 
1299 	blk_throtl_drain(q);
1300 }
1301 
1302 /**
1303  * blkcg_exit_queue - exit and release blkcg part of request_queue
1304  * @q: request_queue being released
1305  *
1306  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1307  */
blkcg_exit_queue(struct request_queue * q)1308 void blkcg_exit_queue(struct request_queue *q)
1309 {
1310 	spin_lock_irq(q->queue_lock);
1311 	blkg_destroy_all(q);
1312 	spin_unlock_irq(q->queue_lock);
1313 
1314 	blk_throtl_exit(q);
1315 }
1316 
1317 /*
1318  * We cannot support shared io contexts, as we have no mean to support
1319  * two tasks with the same ioc in two different groups without major rework
1320  * of the main cic data structures.  For now we allow a task to change
1321  * its cgroup only if it's the only owner of its ioc.
1322  */
blkcg_can_attach(struct cgroup_taskset * tset)1323 static int blkcg_can_attach(struct cgroup_taskset *tset)
1324 {
1325 	struct task_struct *task;
1326 	struct cgroup_subsys_state *dst_css;
1327 	struct io_context *ioc;
1328 	int ret = 0;
1329 
1330 	/* task_lock() is needed to avoid races with exit_io_context() */
1331 	cgroup_taskset_for_each(task, dst_css, tset) {
1332 		task_lock(task);
1333 		ioc = task->io_context;
1334 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1335 			ret = -EINVAL;
1336 		task_unlock(task);
1337 		if (ret)
1338 			break;
1339 	}
1340 	return ret;
1341 }
1342 
blkcg_bind(struct cgroup_subsys_state * root_css)1343 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1344 {
1345 	int i;
1346 
1347 	mutex_lock(&blkcg_pol_mutex);
1348 
1349 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1350 		struct blkcg_policy *pol = blkcg_policy[i];
1351 		struct blkcg *blkcg;
1352 
1353 		if (!pol || !pol->cpd_bind_fn)
1354 			continue;
1355 
1356 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1357 			if (blkcg->cpd[pol->plid])
1358 				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1359 	}
1360 	mutex_unlock(&blkcg_pol_mutex);
1361 }
1362 
blkcg_exit(struct task_struct * tsk)1363 static void blkcg_exit(struct task_struct *tsk)
1364 {
1365 	if (tsk->throttle_queue)
1366 		blk_put_queue(tsk->throttle_queue);
1367 	tsk->throttle_queue = NULL;
1368 }
1369 
1370 struct cgroup_subsys io_cgrp_subsys = {
1371 	.css_alloc = blkcg_css_alloc,
1372 	.css_offline = blkcg_css_offline,
1373 	.css_free = blkcg_css_free,
1374 	.can_attach = blkcg_can_attach,
1375 	.bind = blkcg_bind,
1376 	.dfl_cftypes = blkcg_files,
1377 	.legacy_cftypes = blkcg_legacy_files,
1378 	.legacy_name = "blkio",
1379 	.exit = blkcg_exit,
1380 #ifdef CONFIG_MEMCG
1381 	/*
1382 	 * This ensures that, if available, memcg is automatically enabled
1383 	 * together on the default hierarchy so that the owner cgroup can
1384 	 * be retrieved from writeback pages.
1385 	 */
1386 	.depends_on = 1 << memory_cgrp_id,
1387 #endif
1388 };
1389 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1390 
1391 /**
1392  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1393  * @q: request_queue of interest
1394  * @pol: blkcg policy to activate
1395  *
1396  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1397  * bypass mode to populate its blkgs with policy_data for @pol.
1398  *
1399  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1400  * from IO path.  Update of each blkg is protected by both queue and blkcg
1401  * locks so that holding either lock and testing blkcg_policy_enabled() is
1402  * always enough for dereferencing policy data.
1403  *
1404  * The caller is responsible for synchronizing [de]activations and policy
1405  * [un]registerations.  Returns 0 on success, -errno on failure.
1406  */
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)1407 int blkcg_activate_policy(struct request_queue *q,
1408 			  const struct blkcg_policy *pol)
1409 {
1410 	struct blkg_policy_data *pd_prealloc = NULL;
1411 	struct blkcg_gq *blkg;
1412 	int ret;
1413 
1414 	if (blkcg_policy_enabled(q, pol))
1415 		return 0;
1416 
1417 	if (q->mq_ops)
1418 		blk_mq_freeze_queue(q);
1419 	else
1420 		blk_queue_bypass_start(q);
1421 pd_prealloc:
1422 	if (!pd_prealloc) {
1423 		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1424 		if (!pd_prealloc) {
1425 			ret = -ENOMEM;
1426 			goto out_bypass_end;
1427 		}
1428 	}
1429 
1430 	spin_lock_irq(q->queue_lock);
1431 
1432 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1433 		struct blkg_policy_data *pd;
1434 
1435 		if (blkg->pd[pol->plid])
1436 			continue;
1437 
1438 		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1439 		if (!pd)
1440 			swap(pd, pd_prealloc);
1441 		if (!pd) {
1442 			spin_unlock_irq(q->queue_lock);
1443 			goto pd_prealloc;
1444 		}
1445 
1446 		blkg->pd[pol->plid] = pd;
1447 		pd->blkg = blkg;
1448 		pd->plid = pol->plid;
1449 		if (pol->pd_init_fn)
1450 			pol->pd_init_fn(pd);
1451 	}
1452 
1453 	__set_bit(pol->plid, q->blkcg_pols);
1454 	ret = 0;
1455 
1456 	spin_unlock_irq(q->queue_lock);
1457 out_bypass_end:
1458 	if (q->mq_ops)
1459 		blk_mq_unfreeze_queue(q);
1460 	else
1461 		blk_queue_bypass_end(q);
1462 	if (pd_prealloc)
1463 		pol->pd_free_fn(pd_prealloc);
1464 	return ret;
1465 }
1466 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1467 
1468 /**
1469  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1470  * @q: request_queue of interest
1471  * @pol: blkcg policy to deactivate
1472  *
1473  * Deactivate @pol on @q.  Follows the same synchronization rules as
1474  * blkcg_activate_policy().
1475  */
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)1476 void blkcg_deactivate_policy(struct request_queue *q,
1477 			     const struct blkcg_policy *pol)
1478 {
1479 	struct blkcg_gq *blkg;
1480 
1481 	if (!blkcg_policy_enabled(q, pol))
1482 		return;
1483 
1484 	if (q->mq_ops)
1485 		blk_mq_freeze_queue(q);
1486 	else
1487 		blk_queue_bypass_start(q);
1488 
1489 	spin_lock_irq(q->queue_lock);
1490 
1491 	__clear_bit(pol->plid, q->blkcg_pols);
1492 
1493 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1494 		if (blkg->pd[pol->plid]) {
1495 			if (pol->pd_offline_fn)
1496 				pol->pd_offline_fn(blkg->pd[pol->plid]);
1497 			pol->pd_free_fn(blkg->pd[pol->plid]);
1498 			blkg->pd[pol->plid] = NULL;
1499 		}
1500 	}
1501 
1502 	spin_unlock_irq(q->queue_lock);
1503 
1504 	if (q->mq_ops)
1505 		blk_mq_unfreeze_queue(q);
1506 	else
1507 		blk_queue_bypass_end(q);
1508 }
1509 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1510 
1511 /**
1512  * blkcg_policy_register - register a blkcg policy
1513  * @pol: blkcg policy to register
1514  *
1515  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1516  * successful registration.  Returns 0 on success and -errno on failure.
1517  */
blkcg_policy_register(struct blkcg_policy * pol)1518 int blkcg_policy_register(struct blkcg_policy *pol)
1519 {
1520 	struct blkcg *blkcg;
1521 	int i, ret;
1522 
1523 	mutex_lock(&blkcg_pol_register_mutex);
1524 	mutex_lock(&blkcg_pol_mutex);
1525 
1526 	/* find an empty slot */
1527 	ret = -ENOSPC;
1528 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1529 		if (!blkcg_policy[i])
1530 			break;
1531 	if (i >= BLKCG_MAX_POLS) {
1532 		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1533 		goto err_unlock;
1534 	}
1535 
1536 	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1537 	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1538 		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1539 		goto err_unlock;
1540 
1541 	/* register @pol */
1542 	pol->plid = i;
1543 	blkcg_policy[pol->plid] = pol;
1544 
1545 	/* allocate and install cpd's */
1546 	if (pol->cpd_alloc_fn) {
1547 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1548 			struct blkcg_policy_data *cpd;
1549 
1550 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1551 			if (!cpd)
1552 				goto err_free_cpds;
1553 
1554 			blkcg->cpd[pol->plid] = cpd;
1555 			cpd->blkcg = blkcg;
1556 			cpd->plid = pol->plid;
1557 			pol->cpd_init_fn(cpd);
1558 		}
1559 	}
1560 
1561 	mutex_unlock(&blkcg_pol_mutex);
1562 
1563 	/* everything is in place, add intf files for the new policy */
1564 	if (pol->dfl_cftypes)
1565 		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1566 					       pol->dfl_cftypes));
1567 	if (pol->legacy_cftypes)
1568 		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1569 						  pol->legacy_cftypes));
1570 	mutex_unlock(&blkcg_pol_register_mutex);
1571 	return 0;
1572 
1573 err_free_cpds:
1574 	if (pol->cpd_free_fn) {
1575 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1576 			if (blkcg->cpd[pol->plid]) {
1577 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1578 				blkcg->cpd[pol->plid] = NULL;
1579 			}
1580 		}
1581 	}
1582 	blkcg_policy[pol->plid] = NULL;
1583 err_unlock:
1584 	mutex_unlock(&blkcg_pol_mutex);
1585 	mutex_unlock(&blkcg_pol_register_mutex);
1586 	return ret;
1587 }
1588 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1589 
1590 /**
1591  * blkcg_policy_unregister - unregister a blkcg policy
1592  * @pol: blkcg policy to unregister
1593  *
1594  * Undo blkcg_policy_register(@pol).  Might sleep.
1595  */
blkcg_policy_unregister(struct blkcg_policy * pol)1596 void blkcg_policy_unregister(struct blkcg_policy *pol)
1597 {
1598 	struct blkcg *blkcg;
1599 
1600 	mutex_lock(&blkcg_pol_register_mutex);
1601 
1602 	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1603 		goto out_unlock;
1604 
1605 	/* kill the intf files first */
1606 	if (pol->dfl_cftypes)
1607 		cgroup_rm_cftypes(pol->dfl_cftypes);
1608 	if (pol->legacy_cftypes)
1609 		cgroup_rm_cftypes(pol->legacy_cftypes);
1610 
1611 	/* remove cpds and unregister */
1612 	mutex_lock(&blkcg_pol_mutex);
1613 
1614 	if (pol->cpd_free_fn) {
1615 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1616 			if (blkcg->cpd[pol->plid]) {
1617 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1618 				blkcg->cpd[pol->plid] = NULL;
1619 			}
1620 		}
1621 	}
1622 	blkcg_policy[pol->plid] = NULL;
1623 
1624 	mutex_unlock(&blkcg_pol_mutex);
1625 out_unlock:
1626 	mutex_unlock(&blkcg_pol_register_mutex);
1627 }
1628 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1629 
1630 /*
1631  * Scale the accumulated delay based on how long it has been since we updated
1632  * the delay.  We only call this when we are adding delay, in case it's been a
1633  * while since we added delay, and when we are checking to see if we need to
1634  * delay a task, to account for any delays that may have occurred.
1635  */
blkcg_scale_delay(struct blkcg_gq * blkg,u64 now)1636 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1637 {
1638 	u64 old = atomic64_read(&blkg->delay_start);
1639 
1640 	/*
1641 	 * We only want to scale down every second.  The idea here is that we
1642 	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1643 	 * time window.  We only want to throttle tasks for recent delay that
1644 	 * has occurred, in 1 second time windows since that's the maximum
1645 	 * things can be throttled.  We save the current delay window in
1646 	 * blkg->last_delay so we know what amount is still left to be charged
1647 	 * to the blkg from this point onward.  blkg->last_use keeps track of
1648 	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1649 	 * are ok with whatever is happening now, and we can take away more of
1650 	 * the accumulated delay as we've already throttled enough that
1651 	 * everybody is happy with their IO latencies.
1652 	 */
1653 	if (time_before64(old + NSEC_PER_SEC, now) &&
1654 	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1655 		u64 cur = atomic64_read(&blkg->delay_nsec);
1656 		u64 sub = min_t(u64, blkg->last_delay, now - old);
1657 		int cur_use = atomic_read(&blkg->use_delay);
1658 
1659 		/*
1660 		 * We've been unthrottled, subtract a larger chunk of our
1661 		 * accumulated delay.
1662 		 */
1663 		if (cur_use < blkg->last_use)
1664 			sub = max_t(u64, sub, blkg->last_delay >> 1);
1665 
1666 		/*
1667 		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1668 		 * should only ever be growing except here where we subtract out
1669 		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1670 		 * rather not end up with negative numbers.
1671 		 */
1672 		if (unlikely(cur < sub)) {
1673 			atomic64_set(&blkg->delay_nsec, 0);
1674 			blkg->last_delay = 0;
1675 		} else {
1676 			atomic64_sub(sub, &blkg->delay_nsec);
1677 			blkg->last_delay = cur - sub;
1678 		}
1679 		blkg->last_use = cur_use;
1680 	}
1681 }
1682 
1683 /*
1684  * This is called when we want to actually walk up the hierarchy and check to
1685  * see if we need to throttle, and then actually throttle if there is some
1686  * accumulated delay.  This should only be called upon return to user space so
1687  * we're not holding some lock that would induce a priority inversion.
1688  */
blkcg_maybe_throttle_blkg(struct blkcg_gq * blkg,bool use_memdelay)1689 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1690 {
1691 	u64 now = ktime_to_ns(ktime_get());
1692 	u64 exp;
1693 	u64 delay_nsec = 0;
1694 	int tok;
1695 
1696 	while (blkg->parent) {
1697 		if (atomic_read(&blkg->use_delay)) {
1698 			blkcg_scale_delay(blkg, now);
1699 			delay_nsec = max_t(u64, delay_nsec,
1700 					   atomic64_read(&blkg->delay_nsec));
1701 		}
1702 		blkg = blkg->parent;
1703 	}
1704 
1705 	if (!delay_nsec)
1706 		return;
1707 
1708 	/*
1709 	 * Let's not sleep for all eternity if we've amassed a huge delay.
1710 	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1711 	 * delay, and we want userspace to be able to do _something_ so cap the
1712 	 * delays at 1 second.  If there's 10's of seconds worth of delay then
1713 	 * the tasks will be delayed for 1 second for every syscall.
1714 	 */
1715 	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1716 
1717 	/*
1718 	 * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1719 	 * that hasn't landed upstream yet.  Once that stuff is in place we need
1720 	 * to do a psi_memstall_enter/leave if memdelay is set.
1721 	 */
1722 
1723 	exp = ktime_add_ns(now, delay_nsec);
1724 	tok = io_schedule_prepare();
1725 	do {
1726 		__set_current_state(TASK_KILLABLE);
1727 		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1728 			break;
1729 	} while (!fatal_signal_pending(current));
1730 	io_schedule_finish(tok);
1731 }
1732 
1733 /**
1734  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1735  *
1736  * This is only called if we've been marked with set_notify_resume().  Obviously
1737  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1738  * check to see if current->throttle_queue is set and if not this doesn't do
1739  * anything.  This should only ever be called by the resume code, it's not meant
1740  * to be called by people willy-nilly as it will actually do the work to
1741  * throttle the task if it is setup for throttling.
1742  */
blkcg_maybe_throttle_current(void)1743 void blkcg_maybe_throttle_current(void)
1744 {
1745 	struct request_queue *q = current->throttle_queue;
1746 	struct cgroup_subsys_state *css;
1747 	struct blkcg *blkcg;
1748 	struct blkcg_gq *blkg;
1749 	bool use_memdelay = current->use_memdelay;
1750 
1751 	if (!q)
1752 		return;
1753 
1754 	current->throttle_queue = NULL;
1755 	current->use_memdelay = false;
1756 
1757 	rcu_read_lock();
1758 	css = kthread_blkcg();
1759 	if (css)
1760 		blkcg = css_to_blkcg(css);
1761 	else
1762 		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1763 
1764 	if (!blkcg)
1765 		goto out;
1766 	blkg = blkg_lookup(blkcg, q);
1767 	if (!blkg)
1768 		goto out;
1769 	blkg = blkg_try_get(blkg);
1770 	if (!blkg)
1771 		goto out;
1772 	rcu_read_unlock();
1773 
1774 	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1775 	blkg_put(blkg);
1776 	blk_put_queue(q);
1777 	return;
1778 out:
1779 	rcu_read_unlock();
1780 	blk_put_queue(q);
1781 }
1782 EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
1783 
1784 /**
1785  * blkcg_schedule_throttle - this task needs to check for throttling
1786  * @q - the request queue IO was submitted on
1787  * @use_memdelay - do we charge this to memory delay for PSI
1788  *
1789  * This is called by the IO controller when we know there's delay accumulated
1790  * for the blkg for this task.  We do not pass the blkg because there are places
1791  * we call this that may not have that information, the swapping code for
1792  * instance will only have a request_queue at that point.  This set's the
1793  * notify_resume for the task to check and see if it requires throttling before
1794  * returning to user space.
1795  *
1796  * We will only schedule once per syscall.  You can call this over and over
1797  * again and it will only do the check once upon return to user space, and only
1798  * throttle once.  If the task needs to be throttled again it'll need to be
1799  * re-set at the next time we see the task.
1800  */
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)1801 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1802 {
1803 	if (unlikely(current->flags & PF_KTHREAD))
1804 		return;
1805 
1806 	if (!blk_get_queue(q))
1807 		return;
1808 
1809 	if (current->throttle_queue)
1810 		blk_put_queue(current->throttle_queue);
1811 	current->throttle_queue = q;
1812 	if (use_memdelay)
1813 		current->use_memdelay = use_memdelay;
1814 	set_notify_resume(current);
1815 }
1816 EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
1817 
1818 /**
1819  * blkcg_add_delay - add delay to this blkg
1820  * @now - the current time in nanoseconds
1821  * @delta - how many nanoseconds of delay to add
1822  *
1823  * Charge @delta to the blkg's current delay accumulation.  This is used to
1824  * throttle tasks if an IO controller thinks we need more throttling.
1825  */
blkcg_add_delay(struct blkcg_gq * blkg,u64 now,u64 delta)1826 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1827 {
1828 	blkcg_scale_delay(blkg, now);
1829 	atomic64_add(delta, &blkg->delay_nsec);
1830 }
1831 EXPORT_SYMBOL_GPL(blkcg_add_delay);
1832 
1833 module_param(blkcg_debug_stats, bool, 0644);
1834 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1835