1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9  *
10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11  *		      Paolo Valente <paolo.valente@unimore.it>
12  *
13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14  * 	              Nauman Rafique <nauman@google.com>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
24 #include <linux/blkdev.h>
25 
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
28 
29 /* Max limits for throttle policy */
30 #define THROTL_IOPS_MAX		UINT_MAX
31 
32 #ifdef CONFIG_BLK_CGROUP
33 
34 enum blkg_rwstat_type {
35 	BLKG_RWSTAT_READ,
36 	BLKG_RWSTAT_WRITE,
37 	BLKG_RWSTAT_SYNC,
38 	BLKG_RWSTAT_ASYNC,
39 	BLKG_RWSTAT_DISCARD,
40 
41 	BLKG_RWSTAT_NR,
42 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
43 };
44 
45 struct blkcg_gq;
46 
47 struct blkcg {
48 	struct cgroup_subsys_state	css;
49 	spinlock_t			lock;
50 
51 	struct radix_tree_root		blkg_tree;
52 	struct blkcg_gq	__rcu		*blkg_hint;
53 	struct hlist_head		blkg_list;
54 
55 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
56 
57 	struct list_head		all_blkcgs_node;
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 	struct list_head		cgwb_list;
60 	refcount_t			cgwb_refcnt;
61 #endif
62 };
63 
64 /*
65  * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66  * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
67  * to carry result values from read and sum operations.
68  */
69 struct blkg_stat {
70 	struct percpu_counter		cpu_cnt;
71 	atomic64_t			aux_cnt;
72 };
73 
74 struct blkg_rwstat {
75 	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
76 	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
77 };
78 
79 /*
80  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
81  * request_queue (q).  This is used by blkcg policies which need to track
82  * information per blkcg - q pair.
83  *
84  * There can be multiple active blkcg policies and each blkg:policy pair is
85  * represented by a blkg_policy_data which is allocated and freed by each
86  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
87  * area by allocating larger data structure which embeds blkg_policy_data
88  * at the beginning.
89  */
90 struct blkg_policy_data {
91 	/* the blkg and policy id this per-policy data belongs to */
92 	struct blkcg_gq			*blkg;
93 	int				plid;
94 };
95 
96 /*
97  * Policies that need to keep per-blkcg data which is independent from any
98  * request_queue associated to it should implement cpd_alloc/free_fn()
99  * methods.  A policy can allocate private data area by allocating larger
100  * data structure which embeds blkcg_policy_data at the beginning.
101  * cpd_init() is invoked to let each policy handle per-blkcg data.
102  */
103 struct blkcg_policy_data {
104 	/* the blkcg and policy id this per-policy data belongs to */
105 	struct blkcg			*blkcg;
106 	int				plid;
107 };
108 
109 /* association between a blk cgroup and a request queue */
110 struct blkcg_gq {
111 	/* Pointer to the associated request_queue */
112 	struct request_queue		*q;
113 	struct list_head		q_node;
114 	struct hlist_node		blkcg_node;
115 	struct blkcg			*blkcg;
116 
117 	/*
118 	 * Each blkg gets congested separately and the congestion state is
119 	 * propagated to the matching bdi_writeback_congested.
120 	 */
121 	struct bdi_writeback_congested	*wb_congested;
122 
123 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
124 	struct blkcg_gq			*parent;
125 
126 	/* request allocation list for this blkcg-q pair */
127 	struct request_list		rl;
128 
129 	/* reference count */
130 	atomic_t			refcnt;
131 
132 	/* is this blkg online? protected by both blkcg and q locks */
133 	bool				online;
134 
135 	struct blkg_rwstat		stat_bytes;
136 	struct blkg_rwstat		stat_ios;
137 
138 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
139 
140 	struct rcu_head			rcu_head;
141 
142 	atomic_t			use_delay;
143 	atomic64_t			delay_nsec;
144 	atomic64_t			delay_start;
145 	u64				last_delay;
146 	int				last_use;
147 };
148 
149 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
150 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
151 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
152 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
153 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
154 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
155 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
156 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
157 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
158 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
159 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
160 				      size_t size);
161 
162 struct blkcg_policy {
163 	int				plid;
164 	/* cgroup files for the policy */
165 	struct cftype			*dfl_cftypes;
166 	struct cftype			*legacy_cftypes;
167 
168 	/* operations */
169 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
170 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
171 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
172 	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
173 
174 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
175 	blkcg_pol_init_pd_fn		*pd_init_fn;
176 	blkcg_pol_online_pd_fn		*pd_online_fn;
177 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
178 	blkcg_pol_free_pd_fn		*pd_free_fn;
179 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
180 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
181 };
182 
183 extern struct blkcg blkcg_root;
184 extern struct cgroup_subsys_state * const blkcg_root_css;
185 
186 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
187 				      struct request_queue *q, bool update_hint);
188 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
189 				    struct request_queue *q);
190 int blkcg_init_queue(struct request_queue *q);
191 void blkcg_drain_queue(struct request_queue *q);
192 void blkcg_exit_queue(struct request_queue *q);
193 
194 /* Blkio controller policy registration */
195 int blkcg_policy_register(struct blkcg_policy *pol);
196 void blkcg_policy_unregister(struct blkcg_policy *pol);
197 int blkcg_activate_policy(struct request_queue *q,
198 			  const struct blkcg_policy *pol);
199 void blkcg_deactivate_policy(struct request_queue *q,
200 			     const struct blkcg_policy *pol);
201 
202 const char *blkg_dev_name(struct blkcg_gq *blkg);
203 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
204 		       u64 (*prfill)(struct seq_file *,
205 				     struct blkg_policy_data *, int),
206 		       const struct blkcg_policy *pol, int data,
207 		       bool show_total);
208 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
209 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
210 			 const struct blkg_rwstat *rwstat);
211 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
212 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
213 		       int off);
214 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
215 int blkg_print_stat_ios(struct seq_file *sf, void *v);
216 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
217 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
218 
219 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
220 			    struct blkcg_policy *pol, int off);
221 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
222 					     struct blkcg_policy *pol, int off);
223 
224 struct blkg_conf_ctx {
225 	struct gendisk			*disk;
226 	struct blkcg_gq			*blkg;
227 	char				*body;
228 };
229 
230 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
231 		   char *input, struct blkg_conf_ctx *ctx);
232 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
233 
234 
css_to_blkcg(struct cgroup_subsys_state * css)235 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
236 {
237 	return css ? container_of(css, struct blkcg, css) : NULL;
238 }
239 
bio_blkcg(struct bio * bio)240 static inline struct blkcg *bio_blkcg(struct bio *bio)
241 {
242 	struct cgroup_subsys_state *css;
243 
244 	if (bio && bio->bi_css)
245 		return css_to_blkcg(bio->bi_css);
246 	css = kthread_blkcg();
247 	if (css)
248 		return css_to_blkcg(css);
249 	return css_to_blkcg(task_css(current, io_cgrp_id));
250 }
251 
blk_cgroup_congested(void)252 static inline bool blk_cgroup_congested(void)
253 {
254 	struct cgroup_subsys_state *css;
255 	bool ret = false;
256 
257 	rcu_read_lock();
258 	css = kthread_blkcg();
259 	if (!css)
260 		css = task_css(current, io_cgrp_id);
261 	while (css) {
262 		if (atomic_read(&css->cgroup->congestion_count)) {
263 			ret = true;
264 			break;
265 		}
266 		css = css->parent;
267 	}
268 	rcu_read_unlock();
269 	return ret;
270 }
271 
272 /**
273  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
274  * @return: true if this bio needs to be submitted with the root blkg context.
275  *
276  * In order to avoid priority inversions we sometimes need to issue a bio as if
277  * it were attached to the root blkg, and then backcharge to the actual owning
278  * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
279  * bio and attach the appropriate blkg to the bio.  Then we call this helper and
280  * if it is true run with the root blkg for that queue and then do any
281  * backcharging to the originating cgroup once the io is complete.
282  */
bio_issue_as_root_blkg(struct bio * bio)283 static inline bool bio_issue_as_root_blkg(struct bio *bio)
284 {
285 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
286 }
287 
288 /**
289  * blkcg_parent - get the parent of a blkcg
290  * @blkcg: blkcg of interest
291  *
292  * Return the parent blkcg of @blkcg.  Can be called anytime.
293  */
blkcg_parent(struct blkcg * blkcg)294 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
295 {
296 	return css_to_blkcg(blkcg->css.parent);
297 }
298 
299 /**
300  * __blkg_lookup - internal version of blkg_lookup()
301  * @blkcg: blkcg of interest
302  * @q: request_queue of interest
303  * @update_hint: whether to update lookup hint with the result or not
304  *
305  * This is internal version and shouldn't be used by policy
306  * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
307  * @q's bypass state.  If @update_hint is %true, the caller should be
308  * holding @q->queue_lock and lookup hint is updated on success.
309  */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)310 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
311 					     struct request_queue *q,
312 					     bool update_hint)
313 {
314 	struct blkcg_gq *blkg;
315 
316 	if (blkcg == &blkcg_root)
317 		return q->root_blkg;
318 
319 	blkg = rcu_dereference(blkcg->blkg_hint);
320 	if (blkg && blkg->q == q)
321 		return blkg;
322 
323 	return blkg_lookup_slowpath(blkcg, q, update_hint);
324 }
325 
326 /**
327  * blkg_lookup - lookup blkg for the specified blkcg - q pair
328  * @blkcg: blkcg of interest
329  * @q: request_queue of interest
330  *
331  * Lookup blkg for the @blkcg - @q pair.  This function should be called
332  * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
333  * - see blk_queue_bypass_start() for details.
334  */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)335 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
336 					   struct request_queue *q)
337 {
338 	WARN_ON_ONCE(!rcu_read_lock_held());
339 
340 	if (unlikely(blk_queue_bypass(q)))
341 		return NULL;
342 	return __blkg_lookup(blkcg, q, false);
343 }
344 
345 /**
346  * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
347  * @q: request_queue of interest
348  *
349  * Lookup blkg for @q at the root level. See also blkg_lookup().
350  */
blk_queue_root_blkg(struct request_queue * q)351 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
352 {
353 	return q->root_blkg;
354 }
355 
356 /**
357  * blkg_to_pdata - get policy private data
358  * @blkg: blkg of interest
359  * @pol: policy of interest
360  *
361  * Return pointer to private data associated with the @blkg-@pol pair.
362  */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)363 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
364 						  struct blkcg_policy *pol)
365 {
366 	return blkg ? blkg->pd[pol->plid] : NULL;
367 }
368 
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)369 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
370 						     struct blkcg_policy *pol)
371 {
372 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
373 }
374 
375 /**
376  * pdata_to_blkg - get blkg associated with policy private data
377  * @pd: policy private data of interest
378  *
379  * @pd is policy private data.  Determine the blkg it's associated with.
380  */
pd_to_blkg(struct blkg_policy_data * pd)381 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
382 {
383 	return pd ? pd->blkg : NULL;
384 }
385 
cpd_to_blkcg(struct blkcg_policy_data * cpd)386 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
387 {
388 	return cpd ? cpd->blkcg : NULL;
389 }
390 
391 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
392 
393 #ifdef CONFIG_CGROUP_WRITEBACK
394 
395 /**
396  * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
397  * @blkcg: blkcg of interest
398  *
399  * This is used to track the number of active wb's related to a blkcg.
400  */
blkcg_cgwb_get(struct blkcg * blkcg)401 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
402 {
403 	refcount_inc(&blkcg->cgwb_refcnt);
404 }
405 
406 /**
407  * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
408  * @blkcg: blkcg of interest
409  *
410  * This is used to track the number of active wb's related to a blkcg.
411  * When this count goes to zero, all active wb has finished so the
412  * blkcg can continue destruction by calling blkcg_destroy_blkgs().
413  * This work may occur in cgwb_release_workfn() on the cgwb_release
414  * workqueue.
415  */
blkcg_cgwb_put(struct blkcg * blkcg)416 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
417 {
418 	if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
419 		blkcg_destroy_blkgs(blkcg);
420 }
421 
422 #else
423 
blkcg_cgwb_get(struct blkcg * blkcg)424 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
425 
blkcg_cgwb_put(struct blkcg * blkcg)426 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
427 {
428 	/* wb isn't being accounted, so trigger destruction right away */
429 	blkcg_destroy_blkgs(blkcg);
430 }
431 
432 #endif
433 
434 /**
435  * blkg_path - format cgroup path of blkg
436  * @blkg: blkg of interest
437  * @buf: target buffer
438  * @buflen: target buffer length
439  *
440  * Format the path of the cgroup of @blkg into @buf.
441  */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)442 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
443 {
444 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
445 }
446 
447 /**
448  * blkg_get - get a blkg reference
449  * @blkg: blkg to get
450  *
451  * The caller should be holding an existing reference.
452  */
blkg_get(struct blkcg_gq * blkg)453 static inline void blkg_get(struct blkcg_gq *blkg)
454 {
455 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
456 	atomic_inc(&blkg->refcnt);
457 }
458 
459 /**
460  * blkg_try_get - try and get a blkg reference
461  * @blkg: blkg to get
462  *
463  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
464  * of freeing this blkg, so we can only use it if the refcnt is not zero.
465  */
blkg_try_get(struct blkcg_gq * blkg)466 static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
467 {
468 	if (atomic_inc_not_zero(&blkg->refcnt))
469 		return blkg;
470 	return NULL;
471 }
472 
473 
474 void __blkg_release_rcu(struct rcu_head *rcu);
475 
476 /**
477  * blkg_put - put a blkg reference
478  * @blkg: blkg to put
479  */
blkg_put(struct blkcg_gq * blkg)480 static inline void blkg_put(struct blkcg_gq *blkg)
481 {
482 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
483 	if (atomic_dec_and_test(&blkg->refcnt))
484 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
485 }
486 
487 /**
488  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
489  * @d_blkg: loop cursor pointing to the current descendant
490  * @pos_css: used for iteration
491  * @p_blkg: target blkg to walk descendants of
492  *
493  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
494  * read locked.  If called under either blkcg or queue lock, the iteration
495  * is guaranteed to include all and only online blkgs.  The caller may
496  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
497  * @p_blkg is included in the iteration and the first node to be visited.
498  */
499 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
500 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
501 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
502 					      (p_blkg)->q, false)))
503 
504 /**
505  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
506  * @d_blkg: loop cursor pointing to the current descendant
507  * @pos_css: used for iteration
508  * @p_blkg: target blkg to walk descendants of
509  *
510  * Similar to blkg_for_each_descendant_pre() but performs post-order
511  * traversal instead.  Synchronization rules are the same.  @p_blkg is
512  * included in the iteration and the last node to be visited.
513  */
514 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
515 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
516 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
517 					      (p_blkg)->q, false)))
518 
519 /**
520  * blk_get_rl - get request_list to use
521  * @q: request_queue of interest
522  * @bio: bio which will be attached to the allocated request (may be %NULL)
523  *
524  * The caller wants to allocate a request from @q to use for @bio.  Find
525  * the request_list to use and obtain a reference on it.  Should be called
526  * under queue_lock.  This function is guaranteed to return non-%NULL
527  * request_list.
528  */
blk_get_rl(struct request_queue * q,struct bio * bio)529 static inline struct request_list *blk_get_rl(struct request_queue *q,
530 					      struct bio *bio)
531 {
532 	struct blkcg *blkcg;
533 	struct blkcg_gq *blkg;
534 
535 	rcu_read_lock();
536 
537 	blkcg = bio_blkcg(bio);
538 
539 	/* bypass blkg lookup and use @q->root_rl directly for root */
540 	if (blkcg == &blkcg_root)
541 		goto root_rl;
542 
543 	/*
544 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
545 	 * or if either the blkcg or queue is going away.  Fall back to
546 	 * root_rl in such cases.
547 	 */
548 	blkg = blkg_lookup(blkcg, q);
549 	if (unlikely(!blkg))
550 		goto root_rl;
551 
552 	blkg_get(blkg);
553 	rcu_read_unlock();
554 	return &blkg->rl;
555 root_rl:
556 	rcu_read_unlock();
557 	return &q->root_rl;
558 }
559 
560 /**
561  * blk_put_rl - put request_list
562  * @rl: request_list to put
563  *
564  * Put the reference acquired by blk_get_rl().  Should be called under
565  * queue_lock.
566  */
blk_put_rl(struct request_list * rl)567 static inline void blk_put_rl(struct request_list *rl)
568 {
569 	if (rl->blkg->blkcg != &blkcg_root)
570 		blkg_put(rl->blkg);
571 }
572 
573 /**
574  * blk_rq_set_rl - associate a request with a request_list
575  * @rq: request of interest
576  * @rl: target request_list
577  *
578  * Associate @rq with @rl so that accounting and freeing can know the
579  * request_list @rq came from.
580  */
blk_rq_set_rl(struct request * rq,struct request_list * rl)581 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
582 {
583 	rq->rl = rl;
584 }
585 
586 /**
587  * blk_rq_rl - return the request_list a request came from
588  * @rq: request of interest
589  *
590  * Return the request_list @rq is allocated from.
591  */
blk_rq_rl(struct request * rq)592 static inline struct request_list *blk_rq_rl(struct request *rq)
593 {
594 	return rq->rl;
595 }
596 
597 struct request_list *__blk_queue_next_rl(struct request_list *rl,
598 					 struct request_queue *q);
599 /**
600  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
601  *
602  * Should be used under queue_lock.
603  */
604 #define blk_queue_for_each_rl(rl, q)	\
605 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
606 
blkg_stat_init(struct blkg_stat * stat,gfp_t gfp)607 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
608 {
609 	int ret;
610 
611 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
612 	if (ret)
613 		return ret;
614 
615 	atomic64_set(&stat->aux_cnt, 0);
616 	return 0;
617 }
618 
blkg_stat_exit(struct blkg_stat * stat)619 static inline void blkg_stat_exit(struct blkg_stat *stat)
620 {
621 	percpu_counter_destroy(&stat->cpu_cnt);
622 }
623 
624 /**
625  * blkg_stat_add - add a value to a blkg_stat
626  * @stat: target blkg_stat
627  * @val: value to add
628  *
629  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
630  * don't re-enter this function for the same counter.
631  */
blkg_stat_add(struct blkg_stat * stat,uint64_t val)632 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
633 {
634 	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
635 }
636 
637 /**
638  * blkg_stat_read - read the current value of a blkg_stat
639  * @stat: blkg_stat to read
640  */
blkg_stat_read(struct blkg_stat * stat)641 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
642 {
643 	return percpu_counter_sum_positive(&stat->cpu_cnt);
644 }
645 
646 /**
647  * blkg_stat_reset - reset a blkg_stat
648  * @stat: blkg_stat to reset
649  */
blkg_stat_reset(struct blkg_stat * stat)650 static inline void blkg_stat_reset(struct blkg_stat *stat)
651 {
652 	percpu_counter_set(&stat->cpu_cnt, 0);
653 	atomic64_set(&stat->aux_cnt, 0);
654 }
655 
656 /**
657  * blkg_stat_add_aux - add a blkg_stat into another's aux count
658  * @to: the destination blkg_stat
659  * @from: the source
660  *
661  * Add @from's count including the aux one to @to's aux count.
662  */
blkg_stat_add_aux(struct blkg_stat * to,struct blkg_stat * from)663 static inline void blkg_stat_add_aux(struct blkg_stat *to,
664 				     struct blkg_stat *from)
665 {
666 	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
667 		     &to->aux_cnt);
668 }
669 
blkg_rwstat_init(struct blkg_rwstat * rwstat,gfp_t gfp)670 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
671 {
672 	int i, ret;
673 
674 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
675 		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
676 		if (ret) {
677 			while (--i >= 0)
678 				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
679 			return ret;
680 		}
681 		atomic64_set(&rwstat->aux_cnt[i], 0);
682 	}
683 	return 0;
684 }
685 
blkg_rwstat_exit(struct blkg_rwstat * rwstat)686 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
687 {
688 	int i;
689 
690 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
691 		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
692 }
693 
694 /**
695  * blkg_rwstat_add - add a value to a blkg_rwstat
696  * @rwstat: target blkg_rwstat
697  * @op: REQ_OP and flags
698  * @val: value to add
699  *
700  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
701  * caller is responsible for synchronizing calls to this function.
702  */
blkg_rwstat_add(struct blkg_rwstat * rwstat,unsigned int op,uint64_t val)703 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
704 				   unsigned int op, uint64_t val)
705 {
706 	struct percpu_counter *cnt;
707 
708 	if (op_is_discard(op))
709 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
710 	else if (op_is_write(op))
711 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
712 	else
713 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
714 
715 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
716 
717 	if (op_is_sync(op))
718 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
719 	else
720 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
721 
722 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
723 }
724 
725 /**
726  * blkg_rwstat_read - read the current values of a blkg_rwstat
727  * @rwstat: blkg_rwstat to read
728  *
729  * Read the current snapshot of @rwstat and return it in the aux counts.
730  */
blkg_rwstat_read(struct blkg_rwstat * rwstat)731 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
732 {
733 	struct blkg_rwstat result;
734 	int i;
735 
736 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
737 		atomic64_set(&result.aux_cnt[i],
738 			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
739 	return result;
740 }
741 
742 /**
743  * blkg_rwstat_total - read the total count of a blkg_rwstat
744  * @rwstat: blkg_rwstat to read
745  *
746  * Return the total count of @rwstat regardless of the IO direction.  This
747  * function can be called without synchronization and takes care of u64
748  * atomicity.
749  */
blkg_rwstat_total(struct blkg_rwstat * rwstat)750 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
751 {
752 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
753 
754 	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
755 		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
756 }
757 
758 /**
759  * blkg_rwstat_reset - reset a blkg_rwstat
760  * @rwstat: blkg_rwstat to reset
761  */
blkg_rwstat_reset(struct blkg_rwstat * rwstat)762 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
763 {
764 	int i;
765 
766 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
767 		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
768 		atomic64_set(&rwstat->aux_cnt[i], 0);
769 	}
770 }
771 
772 /**
773  * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
774  * @to: the destination blkg_rwstat
775  * @from: the source
776  *
777  * Add @from's count including the aux one to @to's aux count.
778  */
blkg_rwstat_add_aux(struct blkg_rwstat * to,struct blkg_rwstat * from)779 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
780 				       struct blkg_rwstat *from)
781 {
782 	u64 sum[BLKG_RWSTAT_NR];
783 	int i;
784 
785 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
786 		sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
787 
788 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
789 		atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
790 			     &to->aux_cnt[i]);
791 }
792 
793 #ifdef CONFIG_BLK_DEV_THROTTLING
794 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
795 			   struct bio *bio);
796 #else
blk_throtl_bio(struct request_queue * q,struct blkcg_gq * blkg,struct bio * bio)797 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
798 				  struct bio *bio) { return false; }
799 #endif
800 
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)801 static inline bool blkcg_bio_issue_check(struct request_queue *q,
802 					 struct bio *bio)
803 {
804 	struct blkcg *blkcg;
805 	struct blkcg_gq *blkg;
806 	bool throtl = false;
807 
808 	rcu_read_lock();
809 	blkcg = bio_blkcg(bio);
810 
811 	/* associate blkcg if bio hasn't attached one */
812 	bio_associate_blkcg(bio, &blkcg->css);
813 
814 	blkg = blkg_lookup(blkcg, q);
815 	if (unlikely(!blkg)) {
816 		spin_lock_irq(q->queue_lock);
817 		blkg = blkg_lookup_create(blkcg, q);
818 		if (IS_ERR(blkg))
819 			blkg = NULL;
820 		spin_unlock_irq(q->queue_lock);
821 	}
822 
823 	throtl = blk_throtl_bio(q, blkg, bio);
824 
825 	if (!throtl) {
826 		blkg = blkg ?: q->root_blkg;
827 		/*
828 		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
829 		 * is a split bio and we would have already accounted for the
830 		 * size of the bio.
831 		 */
832 		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
833 			blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
834 					bio->bi_iter.bi_size);
835 		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
836 	}
837 
838 	rcu_read_unlock();
839 	return !throtl;
840 }
841 
blkcg_use_delay(struct blkcg_gq * blkg)842 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
843 {
844 	if (atomic_add_return(1, &blkg->use_delay) == 1)
845 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
846 }
847 
848 /**
849  * blk_cgroup_mergeable - Determine whether to allow or disallow merges
850  * @rq: request to merge into
851  * @bio: bio to merge
852  *
853  * @bio and @rq should belong to the same cgroup and their issue_as_root should
854  * match. The latter is necessary as we don't want to throttle e.g. a metadata
855  * update because it happens to be next to a regular IO.
856  */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)857 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
858 {
859 	return rq->bio->bi_blkg == bio->bi_blkg &&
860 		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
861 }
862 
blkcg_unuse_delay(struct blkcg_gq * blkg)863 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
864 {
865 	int old = atomic_read(&blkg->use_delay);
866 
867 	if (old == 0)
868 		return 0;
869 
870 	/*
871 	 * We do this song and dance because we can race with somebody else
872 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
873 	 * negative and we'd already be in trouble.  We need to subtract 1 and
874 	 * then check to see if we were the last delay so we can drop the
875 	 * congestion count on the cgroup.
876 	 */
877 	while (old) {
878 		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
879 		if (cur == old)
880 			break;
881 		old = cur;
882 	}
883 
884 	if (old == 0)
885 		return 0;
886 	if (old == 1)
887 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
888 	return 1;
889 }
890 
blkcg_clear_delay(struct blkcg_gq * blkg)891 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
892 {
893 	int old = atomic_read(&blkg->use_delay);
894 	if (!old)
895 		return;
896 	/* We only want 1 person clearing the congestion count for this blkg. */
897 	while (old) {
898 		int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
899 		if (cur == old) {
900 			atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
901 			break;
902 		}
903 		old = cur;
904 	}
905 }
906 
907 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
908 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
909 void blkcg_maybe_throttle_current(void);
910 #else	/* CONFIG_BLK_CGROUP */
911 
912 struct blkcg {
913 };
914 
915 struct blkg_policy_data {
916 };
917 
918 struct blkcg_policy_data {
919 };
920 
921 struct blkcg_gq {
922 };
923 
924 struct blkcg_policy {
925 };
926 
927 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
928 
blkcg_maybe_throttle_current(void)929 static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)930 static inline bool blk_cgroup_congested(void) { return false; }
931 
932 #ifdef CONFIG_BLOCK
933 
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)934 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
935 
blkg_lookup(struct blkcg * blkcg,void * key)936 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)937 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
938 { return NULL; }
blkcg_init_queue(struct request_queue * q)939 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_drain_queue(struct request_queue * q)940 static inline void blkcg_drain_queue(struct request_queue *q) { }
blkcg_exit_queue(struct request_queue * q)941 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)942 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)943 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)944 static inline int blkcg_activate_policy(struct request_queue *q,
945 					const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)946 static inline void blkcg_deactivate_policy(struct request_queue *q,
947 					   const struct blkcg_policy *pol) { }
948 
bio_blkcg(struct bio * bio)949 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
950 
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)951 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
952 						  struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)953 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)954 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)955 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)956 static inline void blkg_put(struct blkcg_gq *blkg) { }
957 
blk_get_rl(struct request_queue * q,struct bio * bio)958 static inline struct request_list *blk_get_rl(struct request_queue *q,
959 					      struct bio *bio) { return &q->root_rl; }
blk_put_rl(struct request_list * rl)960 static inline void blk_put_rl(struct request_list *rl) { }
blk_rq_set_rl(struct request * rq,struct request_list * rl)961 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
blk_rq_rl(struct request * rq)962 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
963 
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)964 static inline bool blkcg_bio_issue_check(struct request_queue *q,
965 					 struct bio *bio) { return true; }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)966 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
967 
968 #define blk_queue_for_each_rl(rl, q)	\
969 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
970 
971 #endif	/* CONFIG_BLOCK */
972 #endif	/* CONFIG_BLK_CGROUP */
973 #endif	/* _BLK_CGROUP_H */
974