1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
29 #include <linux/mm.h>
30 #include <linux/vmstat.h>
31 #include <linux/writeback.h>
32 #include <linux/page-flags.h>
33 
34 struct mem_cgroup;
35 struct page;
36 struct mm_struct;
37 struct kmem_cache;
38 
39 /* Cgroup-specific page state, on top of universal node page state */
40 enum memcg_stat_item {
41 	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
42 	MEMCG_RSS,
43 	MEMCG_RSS_HUGE,
44 	MEMCG_SWAP,
45 	MEMCG_SOCK,
46 	/* XXX: why are these zone and not node counters? */
47 	MEMCG_KERNEL_STACK_KB,
48 	MEMCG_NR_STAT,
49 };
50 
51 enum memcg_memory_event {
52 	MEMCG_LOW,
53 	MEMCG_HIGH,
54 	MEMCG_MAX,
55 	MEMCG_OOM,
56 	MEMCG_OOM_KILL,
57 	MEMCG_SWAP_MAX,
58 	MEMCG_SWAP_FAIL,
59 	MEMCG_NR_MEMORY_EVENTS,
60 };
61 
62 enum mem_cgroup_protection {
63 	MEMCG_PROT_NONE,
64 	MEMCG_PROT_LOW,
65 	MEMCG_PROT_MIN,
66 };
67 
68 struct mem_cgroup_reclaim_cookie {
69 	pg_data_t *pgdat;
70 	int priority;
71 	unsigned int generation;
72 };
73 
74 #ifdef CONFIG_MEMCG
75 
76 #define MEM_CGROUP_ID_SHIFT	16
77 #define MEM_CGROUP_ID_MAX	USHRT_MAX
78 
79 struct mem_cgroup_id {
80 	int id;
81 	atomic_t ref;
82 };
83 
84 /*
85  * Per memcg event counter is incremented at every pagein/pageout. With THP,
86  * it will be incremated by the number of pages. This counter is used for
87  * for trigger some periodic events. This is straightforward and better
88  * than using jiffies etc. to handle periodic memcg event.
89  */
90 enum mem_cgroup_events_target {
91 	MEM_CGROUP_TARGET_THRESH,
92 	MEM_CGROUP_TARGET_SOFTLIMIT,
93 	MEM_CGROUP_TARGET_NUMAINFO,
94 	MEM_CGROUP_NTARGETS,
95 };
96 
97 struct mem_cgroup_stat_cpu {
98 	long count[MEMCG_NR_STAT];
99 	unsigned long events[NR_VM_EVENT_ITEMS];
100 	unsigned long nr_page_events;
101 	unsigned long targets[MEM_CGROUP_NTARGETS];
102 };
103 
104 struct mem_cgroup_reclaim_iter {
105 	struct mem_cgroup *position;
106 	/* scan generation, increased every round-trip */
107 	unsigned int generation;
108 };
109 
110 struct lruvec_stat {
111 	long count[NR_VM_NODE_STAT_ITEMS];
112 };
113 
114 /*
115  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
116  * which have elements charged to this memcg.
117  */
118 struct memcg_shrinker_map {
119 	struct rcu_head rcu;
120 	unsigned long map[0];
121 };
122 
123 /*
124  * per-zone information in memory controller.
125  */
126 struct mem_cgroup_per_node {
127 	struct lruvec		lruvec;
128 
129 	struct lruvec_stat __percpu *lruvec_stat_cpu;
130 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
131 
132 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
133 
134 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
135 
136 #ifdef CONFIG_MEMCG_KMEM
137 	struct memcg_shrinker_map __rcu	*shrinker_map;
138 #endif
139 	struct rb_node		tree_node;	/* RB tree node */
140 	unsigned long		usage_in_excess;/* Set to the value by which */
141 						/* the soft limit is exceeded*/
142 	bool			on_tree;
143 	bool			congested;	/* memcg has many dirty pages */
144 						/* backed by a congested BDI */
145 
146 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
147 						/* use container_of	   */
148 };
149 
150 struct mem_cgroup_threshold {
151 	struct eventfd_ctx *eventfd;
152 	unsigned long threshold;
153 };
154 
155 /* For threshold */
156 struct mem_cgroup_threshold_ary {
157 	/* An array index points to threshold just below or equal to usage. */
158 	int current_threshold;
159 	/* Size of entries[] */
160 	unsigned int size;
161 	/* Array of thresholds */
162 	struct mem_cgroup_threshold entries[0];
163 };
164 
165 struct mem_cgroup_thresholds {
166 	/* Primary thresholds array */
167 	struct mem_cgroup_threshold_ary *primary;
168 	/*
169 	 * Spare threshold array.
170 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
171 	 * It must be able to store at least primary->size - 1 entries.
172 	 */
173 	struct mem_cgroup_threshold_ary *spare;
174 };
175 
176 enum memcg_kmem_state {
177 	KMEM_NONE,
178 	KMEM_ALLOCATED,
179 	KMEM_ONLINE,
180 };
181 
182 #if defined(CONFIG_SMP)
183 struct memcg_padding {
184 	char x[0];
185 } ____cacheline_internodealigned_in_smp;
186 #define MEMCG_PADDING(name)      struct memcg_padding name;
187 #else
188 #define MEMCG_PADDING(name)
189 #endif
190 
191 /*
192  * The memory controller data structure. The memory controller controls both
193  * page cache and RSS per cgroup. We would eventually like to provide
194  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
195  * to help the administrator determine what knobs to tune.
196  */
197 struct mem_cgroup {
198 	struct cgroup_subsys_state css;
199 
200 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
201 	struct mem_cgroup_id id;
202 
203 	/* Accounted resources */
204 	struct page_counter memory;
205 	struct page_counter swap;
206 
207 	/* Legacy consumer-oriented counters */
208 	struct page_counter memsw;
209 	struct page_counter kmem;
210 	struct page_counter tcpmem;
211 
212 	/* Upper bound of normal memory consumption range */
213 	unsigned long high;
214 
215 	/* Range enforcement for interrupt charges */
216 	struct work_struct high_work;
217 
218 	unsigned long soft_limit;
219 
220 	/* vmpressure notifications */
221 	struct vmpressure vmpressure;
222 
223 	/*
224 	 * Should the accounting and control be hierarchical, per subtree?
225 	 */
226 	bool use_hierarchy;
227 
228 	/*
229 	 * Should the OOM killer kill all belonging tasks, had it kill one?
230 	 */
231 	bool oom_group;
232 
233 	/* protected by memcg_oom_lock */
234 	bool		oom_lock;
235 	int		under_oom;
236 
237 	int	swappiness;
238 	/* OOM-Killer disable */
239 	int		oom_kill_disable;
240 
241 	/* memory.events */
242 	struct cgroup_file events_file;
243 
244 	/* handle for "memory.swap.events" */
245 	struct cgroup_file swap_events_file;
246 
247 	/* protect arrays of thresholds */
248 	struct mutex thresholds_lock;
249 
250 	/* thresholds for memory usage. RCU-protected */
251 	struct mem_cgroup_thresholds thresholds;
252 
253 	/* thresholds for mem+swap usage. RCU-protected */
254 	struct mem_cgroup_thresholds memsw_thresholds;
255 
256 	/* For oom notifier event fd */
257 	struct list_head oom_notify;
258 
259 	/*
260 	 * Should we move charges of a task when a task is moved into this
261 	 * mem_cgroup ? And what type of charges should we move ?
262 	 */
263 	unsigned long move_charge_at_immigrate;
264 	/* taken only while moving_account > 0 */
265 	spinlock_t		move_lock;
266 	unsigned long		move_lock_flags;
267 
268 	MEMCG_PADDING(_pad1_);
269 
270 	/*
271 	 * set > 0 if pages under this cgroup are moving to other cgroup.
272 	 */
273 	atomic_t		moving_account;
274 	struct task_struct	*move_lock_task;
275 
276 	/* memory.stat */
277 	struct mem_cgroup_stat_cpu __percpu *stat_cpu;
278 
279 	MEMCG_PADDING(_pad2_);
280 
281 	atomic_long_t		stat[MEMCG_NR_STAT];
282 	atomic_long_t		events[NR_VM_EVENT_ITEMS];
283 	atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
284 
285 	unsigned long		socket_pressure;
286 
287 	/* Legacy tcp memory accounting */
288 	bool			tcpmem_active;
289 	int			tcpmem_pressure;
290 
291 #ifdef CONFIG_MEMCG_KMEM
292         /* Index in the kmem_cache->memcg_params.memcg_caches array */
293 	int kmemcg_id;
294 	enum memcg_kmem_state kmem_state;
295 	struct list_head kmem_caches;
296 #endif
297 
298 	int last_scanned_node;
299 #if MAX_NUMNODES > 1
300 	nodemask_t	scan_nodes;
301 	atomic_t	numainfo_events;
302 	atomic_t	numainfo_updating;
303 #endif
304 
305 #ifdef CONFIG_CGROUP_WRITEBACK
306 	struct list_head cgwb_list;
307 	struct wb_domain cgwb_domain;
308 #endif
309 
310 	/* List of events which userspace want to receive */
311 	struct list_head event_list;
312 	spinlock_t event_list_lock;
313 
314 	struct mem_cgroup_per_node *nodeinfo[0];
315 	/* WARNING: nodeinfo must be the last member here */
316 };
317 
318 /*
319  * size of first charge trial. "32" comes from vmscan.c's magic value.
320  * TODO: maybe necessary to use big numbers in big irons.
321  */
322 #define MEMCG_CHARGE_BATCH 32U
323 
324 extern struct mem_cgroup *root_mem_cgroup;
325 
mem_cgroup_is_root(struct mem_cgroup * memcg)326 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327 {
328 	return (memcg == root_mem_cgroup);
329 }
330 
mem_cgroup_disabled(void)331 static inline bool mem_cgroup_disabled(void)
332 {
333 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
334 }
335 
336 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337 						struct mem_cgroup *memcg);
338 
339 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
341 			  bool compound);
342 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
344 			  bool compound);
345 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346 			      bool lrucare, bool compound);
347 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348 		bool compound);
349 void mem_cgroup_uncharge(struct page *page);
350 void mem_cgroup_uncharge_list(struct list_head *page_list);
351 
352 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353 
354 static struct mem_cgroup_per_node *
mem_cgroup_nodeinfo(struct mem_cgroup * memcg,int nid)355 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
356 {
357 	return memcg->nodeinfo[nid];
358 }
359 
360 /**
361  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362  * @node: node of the wanted lruvec
363  * @memcg: memcg of the wanted lruvec
364  *
365  * Returns the lru list vector holding pages for a given @node or a given
366  * @memcg and @zone. This can be the node lruvec, if the memory controller
367  * is disabled.
368  */
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)369 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370 				struct mem_cgroup *memcg)
371 {
372 	struct mem_cgroup_per_node *mz;
373 	struct lruvec *lruvec;
374 
375 	if (mem_cgroup_disabled()) {
376 		lruvec = node_lruvec(pgdat);
377 		goto out;
378 	}
379 
380 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381 	lruvec = &mz->lruvec;
382 out:
383 	/*
384 	 * Since a node can be onlined after the mem_cgroup was created,
385 	 * we have to be prepared to initialize lruvec->pgdat here;
386 	 * and if offlined then reonlined, we need to reinitialize it.
387 	 */
388 	if (unlikely(lruvec->pgdat != pgdat))
389 		lruvec->pgdat = pgdat;
390 	return lruvec;
391 }
392 
393 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394 
395 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397 
398 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
399 
400 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
401 
402 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)403 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
405 }
406 
mem_cgroup_put(struct mem_cgroup * memcg)407 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
408 {
409 	if (memcg)
410 		css_put(&memcg->css);
411 }
412 
413 #define mem_cgroup_from_counter(counter, member)	\
414 	container_of(counter, struct mem_cgroup, member)
415 
416 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
417 				   struct mem_cgroup *,
418 				   struct mem_cgroup_reclaim_cookie *);
419 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
420 int mem_cgroup_scan_tasks(struct mem_cgroup *,
421 			  int (*)(struct task_struct *, void *), void *);
422 
mem_cgroup_id(struct mem_cgroup * memcg)423 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
424 {
425 	if (mem_cgroup_disabled())
426 		return 0;
427 
428 	return memcg->id.id;
429 }
430 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
431 
lruvec_memcg(struct lruvec * lruvec)432 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
433 {
434 	struct mem_cgroup_per_node *mz;
435 
436 	if (mem_cgroup_disabled())
437 		return NULL;
438 
439 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
440 	return mz->memcg;
441 }
442 
443 /**
444  * parent_mem_cgroup - find the accounting parent of a memcg
445  * @memcg: memcg whose parent to find
446  *
447  * Returns the parent memcg, or NULL if this is the root or the memory
448  * controller is in legacy no-hierarchy mode.
449  */
parent_mem_cgroup(struct mem_cgroup * memcg)450 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
451 {
452 	if (!memcg->memory.parent)
453 		return NULL;
454 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
455 }
456 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)457 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
458 			      struct mem_cgroup *root)
459 {
460 	if (root == memcg)
461 		return true;
462 	if (!root->use_hierarchy)
463 		return false;
464 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
465 }
466 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)467 static inline bool mm_match_cgroup(struct mm_struct *mm,
468 				   struct mem_cgroup *memcg)
469 {
470 	struct mem_cgroup *task_memcg;
471 	bool match = false;
472 
473 	rcu_read_lock();
474 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
475 	if (task_memcg)
476 		match = mem_cgroup_is_descendant(task_memcg, memcg);
477 	rcu_read_unlock();
478 	return match;
479 }
480 
481 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
482 ino_t page_cgroup_ino(struct page *page);
483 
mem_cgroup_online(struct mem_cgroup * memcg)484 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
485 {
486 	if (mem_cgroup_disabled())
487 		return true;
488 	return !!(memcg->css.flags & CSS_ONLINE);
489 }
490 
491 /*
492  * For memory reclaim.
493  */
494 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
495 
496 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
497 		int zid, int nr_pages);
498 
499 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
500 					   int nid, unsigned int lru_mask);
501 
502 static inline
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)503 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
504 {
505 	struct mem_cgroup_per_node *mz;
506 	unsigned long nr_pages = 0;
507 	int zid;
508 
509 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
510 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
511 		nr_pages += mz->lru_zone_size[zid][lru];
512 	return nr_pages;
513 }
514 
515 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)516 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
517 		enum lru_list lru, int zone_idx)
518 {
519 	struct mem_cgroup_per_node *mz;
520 
521 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
522 	return mz->lru_zone_size[zone_idx][lru];
523 }
524 
525 void mem_cgroup_handle_over_high(void);
526 
527 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
528 
529 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
530 				struct task_struct *p);
531 
mem_cgroup_enter_user_fault(void)532 static inline void mem_cgroup_enter_user_fault(void)
533 {
534 	WARN_ON(current->in_user_fault);
535 	current->in_user_fault = 1;
536 }
537 
mem_cgroup_exit_user_fault(void)538 static inline void mem_cgroup_exit_user_fault(void)
539 {
540 	WARN_ON(!current->in_user_fault);
541 	current->in_user_fault = 0;
542 }
543 
task_in_memcg_oom(struct task_struct * p)544 static inline bool task_in_memcg_oom(struct task_struct *p)
545 {
546 	return p->memcg_in_oom;
547 }
548 
549 bool mem_cgroup_oom_synchronize(bool wait);
550 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
551 					    struct mem_cgroup *oom_domain);
552 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
553 
554 #ifdef CONFIG_MEMCG_SWAP
555 extern int do_swap_account;
556 #endif
557 
558 struct mem_cgroup *lock_page_memcg(struct page *page);
559 void __unlock_page_memcg(struct mem_cgroup *memcg);
560 void unlock_page_memcg(struct page *page);
561 
562 /*
563  * idx can be of type enum memcg_stat_item or node_stat_item.
564  * Keep in sync with memcg_exact_page_state().
565  */
memcg_page_state(struct mem_cgroup * memcg,int idx)566 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
567 					     int idx)
568 {
569 	long x = atomic_long_read(&memcg->stat[idx]);
570 #ifdef CONFIG_SMP
571 	if (x < 0)
572 		x = 0;
573 #endif
574 	return x;
575 }
576 
577 /* idx can be of type enum memcg_stat_item or node_stat_item */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)578 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
579 				     int idx, int val)
580 {
581 	long x;
582 
583 	if (mem_cgroup_disabled())
584 		return;
585 
586 	x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
587 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
588 		atomic_long_add(x, &memcg->stat[idx]);
589 		x = 0;
590 	}
591 	__this_cpu_write(memcg->stat_cpu->count[idx], x);
592 }
593 
594 /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)595 static inline void mod_memcg_state(struct mem_cgroup *memcg,
596 				   int idx, int val)
597 {
598 	unsigned long flags;
599 
600 	local_irq_save(flags);
601 	__mod_memcg_state(memcg, idx, val);
602 	local_irq_restore(flags);
603 }
604 
605 /**
606  * mod_memcg_page_state - update page state statistics
607  * @page: the page
608  * @idx: page state item to account
609  * @val: number of pages (positive or negative)
610  *
611  * The @page must be locked or the caller must use lock_page_memcg()
612  * to prevent double accounting when the page is concurrently being
613  * moved to another memcg:
614  *
615  *   lock_page(page) or lock_page_memcg(page)
616  *   if (TestClearPageState(page))
617  *     mod_memcg_page_state(page, state, -1);
618  *   unlock_page(page) or unlock_page_memcg(page)
619  *
620  * Kernel pages are an exception to this, since they'll never move.
621  */
__mod_memcg_page_state(struct page * page,int idx,int val)622 static inline void __mod_memcg_page_state(struct page *page,
623 					  int idx, int val)
624 {
625 	if (page->mem_cgroup)
626 		__mod_memcg_state(page->mem_cgroup, idx, val);
627 }
628 
mod_memcg_page_state(struct page * page,int idx,int val)629 static inline void mod_memcg_page_state(struct page *page,
630 					int idx, int val)
631 {
632 	if (page->mem_cgroup)
633 		mod_memcg_state(page->mem_cgroup, idx, val);
634 }
635 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)636 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
637 					      enum node_stat_item idx)
638 {
639 	struct mem_cgroup_per_node *pn;
640 	long x;
641 
642 	if (mem_cgroup_disabled())
643 		return node_page_state(lruvec_pgdat(lruvec), idx);
644 
645 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
646 	x = atomic_long_read(&pn->lruvec_stat[idx]);
647 #ifdef CONFIG_SMP
648 	if (x < 0)
649 		x = 0;
650 #endif
651 	return x;
652 }
653 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)654 static inline void __mod_lruvec_state(struct lruvec *lruvec,
655 				      enum node_stat_item idx, int val)
656 {
657 	struct mem_cgroup_per_node *pn;
658 	long x;
659 
660 	/* Update node */
661 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
662 
663 	if (mem_cgroup_disabled())
664 		return;
665 
666 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
667 
668 	/* Update memcg */
669 	__mod_memcg_state(pn->memcg, idx, val);
670 
671 	/* Update lruvec */
672 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
673 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
674 		atomic_long_add(x, &pn->lruvec_stat[idx]);
675 		x = 0;
676 	}
677 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
678 }
679 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)680 static inline void mod_lruvec_state(struct lruvec *lruvec,
681 				    enum node_stat_item idx, int val)
682 {
683 	unsigned long flags;
684 
685 	local_irq_save(flags);
686 	__mod_lruvec_state(lruvec, idx, val);
687 	local_irq_restore(flags);
688 }
689 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)690 static inline void __mod_lruvec_page_state(struct page *page,
691 					   enum node_stat_item idx, int val)
692 {
693 	pg_data_t *pgdat = page_pgdat(page);
694 	struct lruvec *lruvec;
695 
696 	/* Untracked pages have no memcg, no lruvec. Update only the node */
697 	if (!page->mem_cgroup) {
698 		__mod_node_page_state(pgdat, idx, val);
699 		return;
700 	}
701 
702 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
703 	__mod_lruvec_state(lruvec, idx, val);
704 }
705 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)706 static inline void mod_lruvec_page_state(struct page *page,
707 					 enum node_stat_item idx, int val)
708 {
709 	unsigned long flags;
710 
711 	local_irq_save(flags);
712 	__mod_lruvec_page_state(page, idx, val);
713 	local_irq_restore(flags);
714 }
715 
716 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
717 						gfp_t gfp_mask,
718 						unsigned long *total_scanned);
719 
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)720 static inline void __count_memcg_events(struct mem_cgroup *memcg,
721 					enum vm_event_item idx,
722 					unsigned long count)
723 {
724 	unsigned long x;
725 
726 	if (mem_cgroup_disabled())
727 		return;
728 
729 	x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
730 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
731 		atomic_long_add(x, &memcg->events[idx]);
732 		x = 0;
733 	}
734 	__this_cpu_write(memcg->stat_cpu->events[idx], x);
735 }
736 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)737 static inline void count_memcg_events(struct mem_cgroup *memcg,
738 				      enum vm_event_item idx,
739 				      unsigned long count)
740 {
741 	unsigned long flags;
742 
743 	local_irq_save(flags);
744 	__count_memcg_events(memcg, idx, count);
745 	local_irq_restore(flags);
746 }
747 
count_memcg_page_event(struct page * page,enum vm_event_item idx)748 static inline void count_memcg_page_event(struct page *page,
749 					  enum vm_event_item idx)
750 {
751 	if (page->mem_cgroup)
752 		count_memcg_events(page->mem_cgroup, idx, 1);
753 }
754 
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)755 static inline void count_memcg_event_mm(struct mm_struct *mm,
756 					enum vm_event_item idx)
757 {
758 	struct mem_cgroup *memcg;
759 
760 	if (mem_cgroup_disabled())
761 		return;
762 
763 	rcu_read_lock();
764 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
765 	if (likely(memcg))
766 		count_memcg_events(memcg, idx, 1);
767 	rcu_read_unlock();
768 }
769 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)770 static inline void memcg_memory_event(struct mem_cgroup *memcg,
771 				      enum memcg_memory_event event)
772 {
773 	atomic_long_inc(&memcg->memory_events[event]);
774 	cgroup_file_notify(&memcg->events_file);
775 }
776 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)777 static inline void memcg_memory_event_mm(struct mm_struct *mm,
778 					 enum memcg_memory_event event)
779 {
780 	struct mem_cgroup *memcg;
781 
782 	if (mem_cgroup_disabled())
783 		return;
784 
785 	rcu_read_lock();
786 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
787 	if (likely(memcg))
788 		memcg_memory_event(memcg, event);
789 	rcu_read_unlock();
790 }
791 
792 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
793 void mem_cgroup_split_huge_fixup(struct page *head);
794 #endif
795 
796 #else /* CONFIG_MEMCG */
797 
798 #define MEM_CGROUP_ID_SHIFT	0
799 #define MEM_CGROUP_ID_MAX	0
800 
801 struct mem_cgroup;
802 
mem_cgroup_is_root(struct mem_cgroup * memcg)803 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
804 {
805 	return true;
806 }
807 
mem_cgroup_disabled(void)808 static inline bool mem_cgroup_disabled(void)
809 {
810 	return true;
811 }
812 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)813 static inline void memcg_memory_event(struct mem_cgroup *memcg,
814 				      enum memcg_memory_event event)
815 {
816 }
817 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)818 static inline void memcg_memory_event_mm(struct mm_struct *mm,
819 					 enum memcg_memory_event event)
820 {
821 }
822 
mem_cgroup_protected(struct mem_cgroup * root,struct mem_cgroup * memcg)823 static inline enum mem_cgroup_protection mem_cgroup_protected(
824 	struct mem_cgroup *root, struct mem_cgroup *memcg)
825 {
826 	return MEMCG_PROT_NONE;
827 }
828 
mem_cgroup_try_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,struct mem_cgroup ** memcgp,bool compound)829 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
830 					gfp_t gfp_mask,
831 					struct mem_cgroup **memcgp,
832 					bool compound)
833 {
834 	*memcgp = NULL;
835 	return 0;
836 }
837 
mem_cgroup_try_charge_delay(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,struct mem_cgroup ** memcgp,bool compound)838 static inline int mem_cgroup_try_charge_delay(struct page *page,
839 					      struct mm_struct *mm,
840 					      gfp_t gfp_mask,
841 					      struct mem_cgroup **memcgp,
842 					      bool compound)
843 {
844 	*memcgp = NULL;
845 	return 0;
846 }
847 
mem_cgroup_commit_charge(struct page * page,struct mem_cgroup * memcg,bool lrucare,bool compound)848 static inline void mem_cgroup_commit_charge(struct page *page,
849 					    struct mem_cgroup *memcg,
850 					    bool lrucare, bool compound)
851 {
852 }
853 
mem_cgroup_cancel_charge(struct page * page,struct mem_cgroup * memcg,bool compound)854 static inline void mem_cgroup_cancel_charge(struct page *page,
855 					    struct mem_cgroup *memcg,
856 					    bool compound)
857 {
858 }
859 
mem_cgroup_uncharge(struct page * page)860 static inline void mem_cgroup_uncharge(struct page *page)
861 {
862 }
863 
mem_cgroup_uncharge_list(struct list_head * page_list)864 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
865 {
866 }
867 
mem_cgroup_migrate(struct page * old,struct page * new)868 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
869 {
870 }
871 
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)872 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
873 				struct mem_cgroup *memcg)
874 {
875 	return node_lruvec(pgdat);
876 }
877 
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)878 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
879 						    struct pglist_data *pgdat)
880 {
881 	return &pgdat->lruvec;
882 }
883 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)884 static inline bool mm_match_cgroup(struct mm_struct *mm,
885 		struct mem_cgroup *memcg)
886 {
887 	return true;
888 }
889 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * memcg)890 static inline bool task_in_mem_cgroup(struct task_struct *task,
891 				      const struct mem_cgroup *memcg)
892 {
893 	return true;
894 }
895 
get_mem_cgroup_from_mm(struct mm_struct * mm)896 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
897 {
898 	return NULL;
899 }
900 
get_mem_cgroup_from_page(struct page * page)901 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
902 {
903 	return NULL;
904 }
905 
mem_cgroup_put(struct mem_cgroup * memcg)906 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
907 {
908 }
909 
910 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)911 mem_cgroup_iter(struct mem_cgroup *root,
912 		struct mem_cgroup *prev,
913 		struct mem_cgroup_reclaim_cookie *reclaim)
914 {
915 	return NULL;
916 }
917 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)918 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
919 					 struct mem_cgroup *prev)
920 {
921 }
922 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)923 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
924 		int (*fn)(struct task_struct *, void *), void *arg)
925 {
926 	return 0;
927 }
928 
mem_cgroup_id(struct mem_cgroup * memcg)929 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
930 {
931 	return 0;
932 }
933 
mem_cgroup_from_id(unsigned short id)934 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
935 {
936 	WARN_ON_ONCE(id);
937 	/* XXX: This should always return root_mem_cgroup */
938 	return NULL;
939 }
940 
lruvec_memcg(struct lruvec * lruvec)941 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
942 {
943 	return NULL;
944 }
945 
mem_cgroup_online(struct mem_cgroup * memcg)946 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
947 {
948 	return true;
949 }
950 
951 static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)952 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
953 {
954 	return 0;
955 }
956 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)957 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
958 		enum lru_list lru, int zone_idx)
959 {
960 	return 0;
961 }
962 
963 static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask)964 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
965 			     int nid, unsigned int lru_mask)
966 {
967 	return 0;
968 }
969 
mem_cgroup_get_max(struct mem_cgroup * memcg)970 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
971 {
972 	return 0;
973 }
974 
975 static inline void
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)976 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
977 {
978 }
979 
lock_page_memcg(struct page * page)980 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
981 {
982 	return NULL;
983 }
984 
__unlock_page_memcg(struct mem_cgroup * memcg)985 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
986 {
987 }
988 
unlock_page_memcg(struct page * page)989 static inline void unlock_page_memcg(struct page *page)
990 {
991 }
992 
mem_cgroup_handle_over_high(void)993 static inline void mem_cgroup_handle_over_high(void)
994 {
995 }
996 
mem_cgroup_enter_user_fault(void)997 static inline void mem_cgroup_enter_user_fault(void)
998 {
999 }
1000 
mem_cgroup_exit_user_fault(void)1001 static inline void mem_cgroup_exit_user_fault(void)
1002 {
1003 }
1004 
task_in_memcg_oom(struct task_struct * p)1005 static inline bool task_in_memcg_oom(struct task_struct *p)
1006 {
1007 	return false;
1008 }
1009 
mem_cgroup_oom_synchronize(bool wait)1010 static inline bool mem_cgroup_oom_synchronize(bool wait)
1011 {
1012 	return false;
1013 }
1014 
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1015 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1016 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1017 {
1018 	return NULL;
1019 }
1020 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1021 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1022 {
1023 }
1024 
memcg_page_state(struct mem_cgroup * memcg,int idx)1025 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
1026 					     int idx)
1027 {
1028 	return 0;
1029 }
1030 
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1031 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1032 				     int idx,
1033 				     int nr)
1034 {
1035 }
1036 
mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1037 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1038 				   int idx,
1039 				   int nr)
1040 {
1041 }
1042 
__mod_memcg_page_state(struct page * page,int idx,int nr)1043 static inline void __mod_memcg_page_state(struct page *page,
1044 					  int idx,
1045 					  int nr)
1046 {
1047 }
1048 
mod_memcg_page_state(struct page * page,int idx,int nr)1049 static inline void mod_memcg_page_state(struct page *page,
1050 					int idx,
1051 					int nr)
1052 {
1053 }
1054 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1055 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1056 					      enum node_stat_item idx)
1057 {
1058 	return node_page_state(lruvec_pgdat(lruvec), idx);
1059 }
1060 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1061 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1062 				      enum node_stat_item idx, int val)
1063 {
1064 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1065 }
1066 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1067 static inline void mod_lruvec_state(struct lruvec *lruvec,
1068 				    enum node_stat_item idx, int val)
1069 {
1070 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1071 }
1072 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1073 static inline void __mod_lruvec_page_state(struct page *page,
1074 					   enum node_stat_item idx, int val)
1075 {
1076 	__mod_node_page_state(page_pgdat(page), idx, val);
1077 }
1078 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1079 static inline void mod_lruvec_page_state(struct page *page,
1080 					 enum node_stat_item idx, int val)
1081 {
1082 	mod_node_page_state(page_pgdat(page), idx, val);
1083 }
1084 
1085 static inline
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1086 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1087 					    gfp_t gfp_mask,
1088 					    unsigned long *total_scanned)
1089 {
1090 	return 0;
1091 }
1092 
mem_cgroup_split_huge_fixup(struct page * head)1093 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1094 {
1095 }
1096 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1097 static inline void count_memcg_events(struct mem_cgroup *memcg,
1098 				      enum vm_event_item idx,
1099 				      unsigned long count)
1100 {
1101 }
1102 
count_memcg_page_event(struct page * page,int idx)1103 static inline void count_memcg_page_event(struct page *page,
1104 					  int idx)
1105 {
1106 }
1107 
1108 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1109 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1110 {
1111 }
1112 #endif /* CONFIG_MEMCG */
1113 
1114 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_state(struct mem_cgroup * memcg,int idx)1115 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1116 				     int idx)
1117 {
1118 	__mod_memcg_state(memcg, idx, 1);
1119 }
1120 
1121 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_state(struct mem_cgroup * memcg,int idx)1122 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1123 				     int idx)
1124 {
1125 	__mod_memcg_state(memcg, idx, -1);
1126 }
1127 
1128 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_page_state(struct page * page,int idx)1129 static inline void __inc_memcg_page_state(struct page *page,
1130 					  int idx)
1131 {
1132 	__mod_memcg_page_state(page, idx, 1);
1133 }
1134 
1135 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_page_state(struct page * page,int idx)1136 static inline void __dec_memcg_page_state(struct page *page,
1137 					  int idx)
1138 {
1139 	__mod_memcg_page_state(page, idx, -1);
1140 }
1141 
__inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1142 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1143 				      enum node_stat_item idx)
1144 {
1145 	__mod_lruvec_state(lruvec, idx, 1);
1146 }
1147 
__dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1148 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1149 				      enum node_stat_item idx)
1150 {
1151 	__mod_lruvec_state(lruvec, idx, -1);
1152 }
1153 
__inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1154 static inline void __inc_lruvec_page_state(struct page *page,
1155 					   enum node_stat_item idx)
1156 {
1157 	__mod_lruvec_page_state(page, idx, 1);
1158 }
1159 
__dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1160 static inline void __dec_lruvec_page_state(struct page *page,
1161 					   enum node_stat_item idx)
1162 {
1163 	__mod_lruvec_page_state(page, idx, -1);
1164 }
1165 
1166 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_state(struct mem_cgroup * memcg,int idx)1167 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1168 				   int idx)
1169 {
1170 	mod_memcg_state(memcg, idx, 1);
1171 }
1172 
1173 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_state(struct mem_cgroup * memcg,int idx)1174 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1175 				   int idx)
1176 {
1177 	mod_memcg_state(memcg, idx, -1);
1178 }
1179 
1180 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_page_state(struct page * page,int idx)1181 static inline void inc_memcg_page_state(struct page *page,
1182 					int idx)
1183 {
1184 	mod_memcg_page_state(page, idx, 1);
1185 }
1186 
1187 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_page_state(struct page * page,int idx)1188 static inline void dec_memcg_page_state(struct page *page,
1189 					int idx)
1190 {
1191 	mod_memcg_page_state(page, idx, -1);
1192 }
1193 
inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1194 static inline void inc_lruvec_state(struct lruvec *lruvec,
1195 				    enum node_stat_item idx)
1196 {
1197 	mod_lruvec_state(lruvec, idx, 1);
1198 }
1199 
dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1200 static inline void dec_lruvec_state(struct lruvec *lruvec,
1201 				    enum node_stat_item idx)
1202 {
1203 	mod_lruvec_state(lruvec, idx, -1);
1204 }
1205 
inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1206 static inline void inc_lruvec_page_state(struct page *page,
1207 					 enum node_stat_item idx)
1208 {
1209 	mod_lruvec_page_state(page, idx, 1);
1210 }
1211 
dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1212 static inline void dec_lruvec_page_state(struct page *page,
1213 					 enum node_stat_item idx)
1214 {
1215 	mod_lruvec_page_state(page, idx, -1);
1216 }
1217 
1218 #ifdef CONFIG_CGROUP_WRITEBACK
1219 
1220 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1221 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1222 			 unsigned long *pheadroom, unsigned long *pdirty,
1223 			 unsigned long *pwriteback);
1224 
1225 #else	/* CONFIG_CGROUP_WRITEBACK */
1226 
mem_cgroup_wb_domain(struct bdi_writeback * wb)1227 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1228 {
1229 	return NULL;
1230 }
1231 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1232 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1233 				       unsigned long *pfilepages,
1234 				       unsigned long *pheadroom,
1235 				       unsigned long *pdirty,
1236 				       unsigned long *pwriteback)
1237 {
1238 }
1239 
1240 #endif	/* CONFIG_CGROUP_WRITEBACK */
1241 
1242 struct sock;
1243 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1244 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1245 #ifdef CONFIG_MEMCG
1246 extern struct static_key_false memcg_sockets_enabled_key;
1247 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1248 void mem_cgroup_sk_alloc(struct sock *sk);
1249 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1250 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1251 {
1252 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1253 		return true;
1254 	do {
1255 		if (time_before(jiffies, memcg->socket_pressure))
1256 			return true;
1257 	} while ((memcg = parent_mem_cgroup(memcg)));
1258 	return false;
1259 }
1260 #else
1261 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1262 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1263 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1264 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1265 {
1266 	return false;
1267 }
1268 #endif
1269 
1270 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1271 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1272 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1273 			    struct mem_cgroup *memcg);
1274 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1275 void memcg_kmem_uncharge(struct page *page, int order);
1276 
1277 #ifdef CONFIG_MEMCG_KMEM
1278 extern struct static_key_false memcg_kmem_enabled_key;
1279 extern struct workqueue_struct *memcg_kmem_cache_wq;
1280 
1281 extern int memcg_nr_cache_ids;
1282 void memcg_get_cache_ids(void);
1283 void memcg_put_cache_ids(void);
1284 
1285 /*
1286  * Helper macro to loop through all memcg-specific caches. Callers must still
1287  * check if the cache is valid (it is either valid or NULL).
1288  * the slab_mutex must be held when looping through those caches
1289  */
1290 #define for_each_memcg_cache_index(_idx)	\
1291 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1292 
memcg_kmem_enabled(void)1293 static inline bool memcg_kmem_enabled(void)
1294 {
1295 	return static_branch_unlikely(&memcg_kmem_enabled_key);
1296 }
1297 
1298 /*
1299  * helper for accessing a memcg's index. It will be used as an index in the
1300  * child cache array in kmem_cache, and also to derive its name. This function
1301  * will return -1 when this is not a kmem-limited memcg.
1302  */
memcg_cache_id(struct mem_cgroup * memcg)1303 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1304 {
1305 	return memcg ? memcg->kmemcg_id : -1;
1306 }
1307 
1308 extern int memcg_expand_shrinker_maps(int new_id);
1309 
1310 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1311 				   int nid, int shrinker_id);
1312 #else
1313 #define for_each_memcg_cache_index(_idx)	\
1314 	for (; NULL; )
1315 
memcg_kmem_enabled(void)1316 static inline bool memcg_kmem_enabled(void)
1317 {
1318 	return false;
1319 }
1320 
memcg_cache_id(struct mem_cgroup * memcg)1321 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1322 {
1323 	return -1;
1324 }
1325 
memcg_get_cache_ids(void)1326 static inline void memcg_get_cache_ids(void)
1327 {
1328 }
1329 
memcg_put_cache_ids(void)1330 static inline void memcg_put_cache_ids(void)
1331 {
1332 }
1333 
memcg_set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1334 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1335 					  int nid, int shrinker_id) { }
1336 #endif /* CONFIG_MEMCG_KMEM */
1337 
1338 #endif /* _LINUX_MEMCONTROL_H */
1339