1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 #ifdef CONFIG_CGROUPS
31
32 /*
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
36 */
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
40
41 /* walk only threadgroup leaders */
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43 /* walk all threaded css_sets in the domain */
44 #define CSS_TASK_ITER_THREADED (1U << 1)
45
46 /* internal flags */
47 #define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49 /* a css_task_iter should be treated as an opaque object */
50 struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct list_head *cur_tasks_head;
66 struct css_set *cur_cset;
67 struct css_set *cur_dcset;
68 struct task_struct *cur_task;
69 struct list_head iters_node; /* css_set->task_iters */
70 };
71
72 extern struct file_system_type cgroup_fs_type;
73 extern struct cgroup_root cgrp_dfl_root;
74 extern struct css_set init_css_set;
75
76 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
77 #include <linux/cgroup_subsys.h>
78 #undef SUBSYS
79
80 #define SUBSYS(_x) \
81 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
82 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
83 #include <linux/cgroup_subsys.h>
84 #undef SUBSYS
85
86 /**
87 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
88 * @ss: subsystem in question
89 */
90 #define cgroup_subsys_enabled(ss) \
91 static_branch_likely(&ss ## _enabled_key)
92
93 /**
94 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
95 * @ss: subsystem in question
96 */
97 #define cgroup_subsys_on_dfl(ss) \
98 static_branch_likely(&ss ## _on_dfl_key)
99
100 bool css_has_online_children(struct cgroup_subsys_state *css);
101 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
102 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107 struct cgroup *cgroup_get_from_path(const char *path);
108 struct cgroup *cgroup_get_from_fd(int fd);
109
110 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115 int cgroup_rm_cftypes(struct cftype *cfts);
116 void cgroup_file_notify(struct cgroup_file *cfile);
117
118 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123 void cgroup_fork(struct task_struct *p);
124 extern int cgroup_can_fork(struct task_struct *p);
125 extern void cgroup_cancel_fork(struct task_struct *p);
126 extern void cgroup_post_fork(struct task_struct *p);
127 void cgroup_exit(struct task_struct *p);
128 void cgroup_release(struct task_struct *p);
129 void cgroup_free(struct task_struct *p);
130
131 int cgroup_init_early(void);
132 int cgroup_init(void);
133
134 /*
135 * Iteration helpers and macros.
136 */
137
138 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
139 struct cgroup_subsys_state *parent);
140 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *css);
142 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
143 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
144 struct cgroup_subsys_state *css);
145
146 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
147 struct cgroup_subsys_state **dst_cssp);
148 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150
151 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
152 struct css_task_iter *it);
153 struct task_struct *css_task_iter_next(struct css_task_iter *it);
154 void css_task_iter_end(struct css_task_iter *it);
155
156 /**
157 * css_for_each_child - iterate through children of a css
158 * @pos: the css * to use as the loop cursor
159 * @parent: css whose children to walk
160 *
161 * Walk @parent's children. Must be called under rcu_read_lock().
162 *
163 * If a subsystem synchronizes ->css_online() and the start of iteration, a
164 * css which finished ->css_online() is guaranteed to be visible in the
165 * future iterations and will stay visible until the last reference is put.
166 * A css which hasn't finished ->css_online() or already finished
167 * ->css_offline() may show up during traversal. It's each subsystem's
168 * responsibility to synchronize against on/offlining.
169 *
170 * It is allowed to temporarily drop RCU read lock during iteration. The
171 * caller is responsible for ensuring that @pos remains accessible until
172 * the start of the next iteration by, for example, bumping the css refcnt.
173 */
174 #define css_for_each_child(pos, parent) \
175 for ((pos) = css_next_child(NULL, (parent)); (pos); \
176 (pos) = css_next_child((pos), (parent)))
177
178 /**
179 * css_for_each_descendant_pre - pre-order walk of a css's descendants
180 * @pos: the css * to use as the loop cursor
181 * @root: css whose descendants to walk
182 *
183 * Walk @root's descendants. @root is included in the iteration and the
184 * first node to be visited. Must be called under rcu_read_lock().
185 *
186 * If a subsystem synchronizes ->css_online() and the start of iteration, a
187 * css which finished ->css_online() is guaranteed to be visible in the
188 * future iterations and will stay visible until the last reference is put.
189 * A css which hasn't finished ->css_online() or already finished
190 * ->css_offline() may show up during traversal. It's each subsystem's
191 * responsibility to synchronize against on/offlining.
192 *
193 * For example, the following guarantees that a descendant can't escape
194 * state updates of its ancestors.
195 *
196 * my_online(@css)
197 * {
198 * Lock @css's parent and @css;
199 * Inherit state from the parent;
200 * Unlock both.
201 * }
202 *
203 * my_update_state(@css)
204 * {
205 * css_for_each_descendant_pre(@pos, @css) {
206 * Lock @pos;
207 * if (@pos == @css)
208 * Update @css's state;
209 * else
210 * Verify @pos is alive and inherit state from its parent;
211 * Unlock @pos;
212 * }
213 * }
214 *
215 * As long as the inheriting step, including checking the parent state, is
216 * enclosed inside @pos locking, double-locking the parent isn't necessary
217 * while inheriting. The state update to the parent is guaranteed to be
218 * visible by walking order and, as long as inheriting operations to the
219 * same @pos are atomic to each other, multiple updates racing each other
220 * still result in the correct state. It's guaranateed that at least one
221 * inheritance happens for any css after the latest update to its parent.
222 *
223 * If checking parent's state requires locking the parent, each inheriting
224 * iteration should lock and unlock both @pos->parent and @pos.
225 *
226 * Alternatively, a subsystem may choose to use a single global lock to
227 * synchronize ->css_online() and ->css_offline() against tree-walking
228 * operations.
229 *
230 * It is allowed to temporarily drop RCU read lock during iteration. The
231 * caller is responsible for ensuring that @pos remains accessible until
232 * the start of the next iteration by, for example, bumping the css refcnt.
233 */
234 #define css_for_each_descendant_pre(pos, css) \
235 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
236 (pos) = css_next_descendant_pre((pos), (css)))
237
238 /**
239 * css_for_each_descendant_post - post-order walk of a css's descendants
240 * @pos: the css * to use as the loop cursor
241 * @css: css whose descendants to walk
242 *
243 * Similar to css_for_each_descendant_pre() but performs post-order
244 * traversal instead. @root is included in the iteration and the last
245 * node to be visited.
246 *
247 * If a subsystem synchronizes ->css_online() and the start of iteration, a
248 * css which finished ->css_online() is guaranteed to be visible in the
249 * future iterations and will stay visible until the last reference is put.
250 * A css which hasn't finished ->css_online() or already finished
251 * ->css_offline() may show up during traversal. It's each subsystem's
252 * responsibility to synchronize against on/offlining.
253 *
254 * Note that the walk visibility guarantee example described in pre-order
255 * walk doesn't apply the same to post-order walks.
256 */
257 #define css_for_each_descendant_post(pos, css) \
258 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
259 (pos) = css_next_descendant_post((pos), (css)))
260
261 /**
262 * cgroup_taskset_for_each - iterate cgroup_taskset
263 * @task: the loop cursor
264 * @dst_css: the destination css
265 * @tset: taskset to iterate
266 *
267 * @tset may contain multiple tasks and they may belong to multiple
268 * processes.
269 *
270 * On the v2 hierarchy, there may be tasks from multiple processes and they
271 * may not share the source or destination csses.
272 *
273 * On traditional hierarchies, when there are multiple tasks in @tset, if a
274 * task of a process is in @tset, all tasks of the process are in @tset.
275 * Also, all are guaranteed to share the same source and destination csses.
276 *
277 * Iteration is not in any specific order.
278 */
279 #define cgroup_taskset_for_each(task, dst_css, tset) \
280 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
281 (task); \
282 (task) = cgroup_taskset_next((tset), &(dst_css)))
283
284 /**
285 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
286 * @leader: the loop cursor
287 * @dst_css: the destination css
288 * @tset: taskset to iterate
289 *
290 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
291 * may not contain any.
292 */
293 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
294 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
295 (leader); \
296 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
297 if ((leader) != (leader)->group_leader) \
298 ; \
299 else
300
301 /*
302 * Inline functions.
303 */
304
305 /**
306 * css_get - obtain a reference on the specified css
307 * @css: target css
308 *
309 * The caller must already have a reference.
310 */
css_get(struct cgroup_subsys_state * css)311 static inline void css_get(struct cgroup_subsys_state *css)
312 {
313 if (!(css->flags & CSS_NO_REF))
314 percpu_ref_get(&css->refcnt);
315 }
316
317 /**
318 * css_get_many - obtain references on the specified css
319 * @css: target css
320 * @n: number of references to get
321 *
322 * The caller must already have a reference.
323 */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)324 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
325 {
326 if (!(css->flags & CSS_NO_REF))
327 percpu_ref_get_many(&css->refcnt, n);
328 }
329
330 /**
331 * css_tryget - try to obtain a reference on the specified css
332 * @css: target css
333 *
334 * Obtain a reference on @css unless it already has reached zero and is
335 * being released. This function doesn't care whether @css is on or
336 * offline. The caller naturally needs to ensure that @css is accessible
337 * but doesn't have to be holding a reference on it - IOW, RCU protected
338 * access is good enough for this function. Returns %true if a reference
339 * count was successfully obtained; %false otherwise.
340 */
css_tryget(struct cgroup_subsys_state * css)341 static inline bool css_tryget(struct cgroup_subsys_state *css)
342 {
343 if (!(css->flags & CSS_NO_REF))
344 return percpu_ref_tryget(&css->refcnt);
345 return true;
346 }
347
348 /**
349 * css_tryget_online - try to obtain a reference on the specified css if online
350 * @css: target css
351 *
352 * Obtain a reference on @css if it's online. The caller naturally needs
353 * to ensure that @css is accessible but doesn't have to be holding a
354 * reference on it - IOW, RCU protected access is good enough for this
355 * function. Returns %true if a reference count was successfully obtained;
356 * %false otherwise.
357 */
css_tryget_online(struct cgroup_subsys_state * css)358 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
359 {
360 if (!(css->flags & CSS_NO_REF))
361 return percpu_ref_tryget_live(&css->refcnt);
362 return true;
363 }
364
365 /**
366 * css_is_dying - test whether the specified css is dying
367 * @css: target css
368 *
369 * Test whether @css is in the process of offlining or already offline. In
370 * most cases, ->css_online() and ->css_offline() callbacks should be
371 * enough; however, the actual offline operations are RCU delayed and this
372 * test returns %true also when @css is scheduled to be offlined.
373 *
374 * This is useful, for example, when the use case requires synchronous
375 * behavior with respect to cgroup removal. cgroup removal schedules css
376 * offlining but the css can seem alive while the operation is being
377 * delayed. If the delay affects user visible semantics, this test can be
378 * used to resolve the situation.
379 */
css_is_dying(struct cgroup_subsys_state * css)380 static inline bool css_is_dying(struct cgroup_subsys_state *css)
381 {
382 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
383 }
384
385 /**
386 * css_put - put a css reference
387 * @css: target css
388 *
389 * Put a reference obtained via css_get() and css_tryget_online().
390 */
css_put(struct cgroup_subsys_state * css)391 static inline void css_put(struct cgroup_subsys_state *css)
392 {
393 if (!(css->flags & CSS_NO_REF))
394 percpu_ref_put(&css->refcnt);
395 }
396
397 /**
398 * css_put_many - put css references
399 * @css: target css
400 * @n: number of references to put
401 *
402 * Put references obtained via css_get() and css_tryget_online().
403 */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)404 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
405 {
406 if (!(css->flags & CSS_NO_REF))
407 percpu_ref_put_many(&css->refcnt, n);
408 }
409
cgroup_get(struct cgroup * cgrp)410 static inline void cgroup_get(struct cgroup *cgrp)
411 {
412 css_get(&cgrp->self);
413 }
414
cgroup_tryget(struct cgroup * cgrp)415 static inline bool cgroup_tryget(struct cgroup *cgrp)
416 {
417 return css_tryget(&cgrp->self);
418 }
419
cgroup_put(struct cgroup * cgrp)420 static inline void cgroup_put(struct cgroup *cgrp)
421 {
422 css_put(&cgrp->self);
423 }
424
425 /**
426 * task_css_set_check - obtain a task's css_set with extra access conditions
427 * @task: the task to obtain css_set for
428 * @__c: extra condition expression to be passed to rcu_dereference_check()
429 *
430 * A task's css_set is RCU protected, initialized and exited while holding
431 * task_lock(), and can only be modified while holding both cgroup_mutex
432 * and task_lock() while the task is alive. This macro verifies that the
433 * caller is inside proper critical section and returns @task's css_set.
434 *
435 * The caller can also specify additional allowed conditions via @__c, such
436 * as locks used during the cgroup_subsys::attach() methods.
437 */
438 #ifdef CONFIG_PROVE_RCU
439 extern struct mutex cgroup_mutex;
440 extern spinlock_t css_set_lock;
441 #define task_css_set_check(task, __c) \
442 rcu_dereference_check((task)->cgroups, \
443 lockdep_is_held(&cgroup_mutex) || \
444 lockdep_is_held(&css_set_lock) || \
445 ((task)->flags & PF_EXITING) || (__c))
446 #else
447 #define task_css_set_check(task, __c) \
448 rcu_dereference((task)->cgroups)
449 #endif
450
451 /**
452 * task_css_check - obtain css for (task, subsys) w/ extra access conds
453 * @task: the target task
454 * @subsys_id: the target subsystem ID
455 * @__c: extra condition expression to be passed to rcu_dereference_check()
456 *
457 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
458 * synchronization rules are the same as task_css_set_check().
459 */
460 #define task_css_check(task, subsys_id, __c) \
461 task_css_set_check((task), (__c))->subsys[(subsys_id)]
462
463 /**
464 * task_css_set - obtain a task's css_set
465 * @task: the task to obtain css_set for
466 *
467 * See task_css_set_check().
468 */
task_css_set(struct task_struct * task)469 static inline struct css_set *task_css_set(struct task_struct *task)
470 {
471 return task_css_set_check(task, false);
472 }
473
474 /**
475 * task_css - obtain css for (task, subsys)
476 * @task: the target task
477 * @subsys_id: the target subsystem ID
478 *
479 * See task_css_check().
480 */
task_css(struct task_struct * task,int subsys_id)481 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
482 int subsys_id)
483 {
484 return task_css_check(task, subsys_id, false);
485 }
486
487 /**
488 * task_get_css - find and get the css for (task, subsys)
489 * @task: the target task
490 * @subsys_id: the target subsystem ID
491 *
492 * Find the css for the (@task, @subsys_id) combination, increment a
493 * reference on and return it. This function is guaranteed to return a
494 * valid css. The returned css may already have been offlined.
495 */
496 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)497 task_get_css(struct task_struct *task, int subsys_id)
498 {
499 struct cgroup_subsys_state *css;
500
501 rcu_read_lock();
502 while (true) {
503 css = task_css(task, subsys_id);
504 /*
505 * Can't use css_tryget_online() here. A task which has
506 * PF_EXITING set may stay associated with an offline css.
507 * If such task calls this function, css_tryget_online()
508 * will keep failing.
509 */
510 if (likely(css_tryget(css)))
511 break;
512 cpu_relax();
513 }
514 rcu_read_unlock();
515 return css;
516 }
517
518 /**
519 * task_css_is_root - test whether a task belongs to the root css
520 * @task: the target task
521 * @subsys_id: the target subsystem ID
522 *
523 * Test whether @task belongs to the root css on the specified subsystem.
524 * May be invoked in any context.
525 */
task_css_is_root(struct task_struct * task,int subsys_id)526 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
527 {
528 return task_css_check(task, subsys_id, true) ==
529 init_css_set.subsys[subsys_id];
530 }
531
task_cgroup(struct task_struct * task,int subsys_id)532 static inline struct cgroup *task_cgroup(struct task_struct *task,
533 int subsys_id)
534 {
535 return task_css(task, subsys_id)->cgroup;
536 }
537
task_dfl_cgroup(struct task_struct * task)538 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
539 {
540 return task_css_set(task)->dfl_cgrp;
541 }
542
cgroup_parent(struct cgroup * cgrp)543 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
544 {
545 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
546
547 if (parent_css)
548 return container_of(parent_css, struct cgroup, self);
549 return NULL;
550 }
551
552 /**
553 * cgroup_is_descendant - test ancestry
554 * @cgrp: the cgroup to be tested
555 * @ancestor: possible ancestor of @cgrp
556 *
557 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
558 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
559 * and @ancestor are accessible.
560 */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)561 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
562 struct cgroup *ancestor)
563 {
564 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
565 return false;
566 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
567 }
568
569 /**
570 * cgroup_ancestor - find ancestor of cgroup
571 * @cgrp: cgroup to find ancestor of
572 * @ancestor_level: level of ancestor to find starting from root
573 *
574 * Find ancestor of cgroup at specified level starting from root if it exists
575 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
576 * @ancestor_level.
577 *
578 * This function is safe to call as long as @cgrp is accessible.
579 */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)580 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
581 int ancestor_level)
582 {
583 struct cgroup *ptr;
584
585 if (cgrp->level < ancestor_level)
586 return NULL;
587
588 for (ptr = cgrp;
589 ptr && ptr->level > ancestor_level;
590 ptr = cgroup_parent(ptr))
591 ;
592
593 if (ptr && ptr->level == ancestor_level)
594 return ptr;
595
596 return NULL;
597 }
598
599 /**
600 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
601 * @task: the task to be tested
602 * @ancestor: possible ancestor of @task's cgroup
603 *
604 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
605 * It follows all the same rules as cgroup_is_descendant, and only applies
606 * to the default hierarchy.
607 */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)608 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
609 struct cgroup *ancestor)
610 {
611 struct css_set *cset = task_css_set(task);
612
613 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
614 }
615
616 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)617 static inline bool cgroup_is_populated(struct cgroup *cgrp)
618 {
619 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
620 cgrp->nr_populated_threaded_children;
621 }
622
623 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)624 static inline ino_t cgroup_ino(struct cgroup *cgrp)
625 {
626 return cgrp->kn->id.ino;
627 }
628
629 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)630 static inline struct cftype *of_cft(struct kernfs_open_file *of)
631 {
632 return of->kn->priv;
633 }
634
635 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
636
637 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)638 static inline struct cftype *seq_cft(struct seq_file *seq)
639 {
640 return of_cft(seq->private);
641 }
642
seq_css(struct seq_file * seq)643 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
644 {
645 return of_css(seq->private);
646 }
647
648 /*
649 * Name / path handling functions. All are thin wrappers around the kernfs
650 * counterparts and can be called under any context.
651 */
652
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)653 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
654 {
655 return kernfs_name(cgrp->kn, buf, buflen);
656 }
657
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)658 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
659 {
660 return kernfs_path(cgrp->kn, buf, buflen);
661 }
662
pr_cont_cgroup_name(struct cgroup * cgrp)663 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
664 {
665 pr_cont_kernfs_name(cgrp->kn);
666 }
667
pr_cont_cgroup_path(struct cgroup * cgrp)668 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
669 {
670 pr_cont_kernfs_path(cgrp->kn);
671 }
672
cgroup_init_kthreadd(void)673 static inline void cgroup_init_kthreadd(void)
674 {
675 /*
676 * kthreadd is inherited by all kthreads, keep it in the root so
677 * that the new kthreads are guaranteed to stay in the root until
678 * initialization is finished.
679 */
680 current->no_cgroup_migration = 1;
681 }
682
cgroup_kthread_ready(void)683 static inline void cgroup_kthread_ready(void)
684 {
685 /*
686 * This kthread finished initialization. The creator should have
687 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
688 */
689 current->no_cgroup_migration = 0;
690 }
691
cgroup_get_kernfs_id(struct cgroup * cgrp)692 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
693 {
694 return &cgrp->kn->id;
695 }
696
697 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
698 char *buf, size_t buflen);
699 #else /* !CONFIG_CGROUPS */
700
701 struct cgroup_subsys_state;
702 struct cgroup;
703
css_put(struct cgroup_subsys_state * css)704 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)705 static inline int cgroup_attach_task_all(struct task_struct *from,
706 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)707 static inline int cgroupstats_build(struct cgroupstats *stats,
708 struct dentry *dentry) { return -EINVAL; }
709
cgroup_fork(struct task_struct * p)710 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p)711 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
cgroup_cancel_fork(struct task_struct * p)712 static inline void cgroup_cancel_fork(struct task_struct *p) {}
cgroup_post_fork(struct task_struct * p)713 static inline void cgroup_post_fork(struct task_struct *p) {}
cgroup_exit(struct task_struct * p)714 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)715 static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)716 static inline void cgroup_free(struct task_struct *p) {}
717
cgroup_init_early(void)718 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)719 static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)720 static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)721 static inline void cgroup_kthread_ready(void) {}
cgroup_get_kernfs_id(struct cgroup * cgrp)722 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
723 {
724 return NULL;
725 }
726
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)727 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
728 struct cgroup *ancestor)
729 {
730 return true;
731 }
732
cgroup_path_from_kernfs_id(const union kernfs_node_id * id,char * buf,size_t buflen)733 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
734 char *buf, size_t buflen) {}
735 #endif /* !CONFIG_CGROUPS */
736
737 #ifdef CONFIG_CGROUPS
738 /*
739 * cgroup scalable recursive statistics.
740 */
741 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
742 void cgroup_rstat_flush(struct cgroup *cgrp);
743 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
744 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
745 void cgroup_rstat_flush_release(void);
746
747 /*
748 * Basic resource stats.
749 */
750 #ifdef CONFIG_CGROUP_CPUACCT
751 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
752 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
753 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)754 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)755 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
756 u64 val) {}
757 #endif
758
759 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
760 void __cgroup_account_cputime_field(struct cgroup *cgrp,
761 enum cpu_usage_stat index, u64 delta_exec);
762
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)763 static inline void cgroup_account_cputime(struct task_struct *task,
764 u64 delta_exec)
765 {
766 struct cgroup *cgrp;
767
768 cpuacct_charge(task, delta_exec);
769
770 rcu_read_lock();
771 cgrp = task_dfl_cgroup(task);
772 if (cgroup_parent(cgrp))
773 __cgroup_account_cputime(cgrp, delta_exec);
774 rcu_read_unlock();
775 }
776
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)777 static inline void cgroup_account_cputime_field(struct task_struct *task,
778 enum cpu_usage_stat index,
779 u64 delta_exec)
780 {
781 struct cgroup *cgrp;
782
783 cpuacct_account_field(task, index, delta_exec);
784
785 rcu_read_lock();
786 cgrp = task_dfl_cgroup(task);
787 if (cgroup_parent(cgrp))
788 __cgroup_account_cputime_field(cgrp, index, delta_exec);
789 rcu_read_unlock();
790 }
791
792 #else /* CONFIG_CGROUPS */
793
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)794 static inline void cgroup_account_cputime(struct task_struct *task,
795 u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)796 static inline void cgroup_account_cputime_field(struct task_struct *task,
797 enum cpu_usage_stat index,
798 u64 delta_exec) {}
799
800 #endif /* CONFIG_CGROUPS */
801
802 /*
803 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
804 * definition in cgroup-defs.h.
805 */
806 #ifdef CONFIG_SOCK_CGROUP_DATA
807
808 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
809 extern spinlock_t cgroup_sk_update_lock;
810 #endif
811
812 void cgroup_sk_alloc_disable(void);
813 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
814 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
815 void cgroup_sk_free(struct sock_cgroup_data *skcd);
816
sock_cgroup_ptr(struct sock_cgroup_data * skcd)817 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
818 {
819 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
820 unsigned long v;
821
822 /*
823 * @skcd->val is 64bit but the following is safe on 32bit too as we
824 * just need the lower ulong to be written and read atomically.
825 */
826 v = READ_ONCE(skcd->val);
827
828 if (v & 3)
829 return &cgrp_dfl_root.cgrp;
830
831 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
832 #else
833 return (struct cgroup *)(unsigned long)skcd->val;
834 #endif
835 }
836
837 #else /* CONFIG_CGROUP_DATA */
838
cgroup_sk_alloc(struct sock_cgroup_data * skcd)839 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_clone(struct sock_cgroup_data * skcd)840 static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)841 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
842
843 #endif /* CONFIG_CGROUP_DATA */
844
845 struct cgroup_namespace {
846 refcount_t count;
847 struct ns_common ns;
848 struct user_namespace *user_ns;
849 struct ucounts *ucounts;
850 struct css_set *root_cset;
851 };
852
853 extern struct cgroup_namespace init_cgroup_ns;
854
855 #ifdef CONFIG_CGROUPS
856
857 void free_cgroup_ns(struct cgroup_namespace *ns);
858
859 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
860 struct user_namespace *user_ns,
861 struct cgroup_namespace *old_ns);
862
863 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
864 struct cgroup_namespace *ns);
865
866 #else /* !CONFIG_CGROUPS */
867
free_cgroup_ns(struct cgroup_namespace * ns)868 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
869 static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)870 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
871 struct cgroup_namespace *old_ns)
872 {
873 return old_ns;
874 }
875
876 #endif /* !CONFIG_CGROUPS */
877
get_cgroup_ns(struct cgroup_namespace * ns)878 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
879 {
880 if (ns)
881 refcount_inc(&ns->count);
882 }
883
put_cgroup_ns(struct cgroup_namespace * ns)884 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
885 {
886 if (ns && refcount_dec_and_test(&ns->count))
887 free_cgroup_ns(ns);
888 }
889
890 #endif /* _LINUX_CGROUP_H */
891