1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __INCLUDE_LINUX_OOM_H
3 #define __INCLUDE_LINUX_OOM_H
4 
5 
6 #include <linux/sched/signal.h>
7 #include <linux/types.h>
8 #include <linux/nodemask.h>
9 #include <uapi/linux/oom.h>
10 #include <linux/sched/coredump.h> /* MMF_* */
11 #include <linux/mm.h> /* VM_FAULT* */
12 
13 struct zonelist;
14 struct notifier_block;
15 struct mem_cgroup;
16 struct task_struct;
17 
18 /*
19  * Details of the page allocation that triggered the oom killer that are used to
20  * determine what should be killed.
21  */
22 struct oom_control {
23 	/* Used to determine cpuset */
24 	struct zonelist *zonelist;
25 
26 	/* Used to determine mempolicy */
27 	nodemask_t *nodemask;
28 
29 	/* Memory cgroup in which oom is invoked, or NULL for global oom */
30 	struct mem_cgroup *memcg;
31 
32 	/* Used to determine cpuset and node locality requirement */
33 	const gfp_t gfp_mask;
34 
35 	/*
36 	 * order == -1 means the oom kill is required by sysrq, otherwise only
37 	 * for display purposes.
38 	 */
39 	const int order;
40 
41 	/* Used by oom implementation, do not set */
42 	unsigned long totalpages;
43 	struct task_struct *chosen;
44 	unsigned long chosen_points;
45 };
46 
47 extern struct mutex oom_lock;
48 extern struct mutex oom_adj_mutex;
49 
set_current_oom_origin(void)50 static inline void set_current_oom_origin(void)
51 {
52 	current->signal->oom_flag_origin = true;
53 }
54 
clear_current_oom_origin(void)55 static inline void clear_current_oom_origin(void)
56 {
57 	current->signal->oom_flag_origin = false;
58 }
59 
oom_task_origin(const struct task_struct * p)60 static inline bool oom_task_origin(const struct task_struct *p)
61 {
62 	return p->signal->oom_flag_origin;
63 }
64 
tsk_is_oom_victim(struct task_struct * tsk)65 static inline bool tsk_is_oom_victim(struct task_struct * tsk)
66 {
67 	return tsk->signal->oom_mm;
68 }
69 
70 /*
71  * Use this helper if tsk->mm != mm and the victim mm needs a special
72  * handling. This is guaranteed to stay true after once set.
73  */
mm_is_oom_victim(struct mm_struct * mm)74 static inline bool mm_is_oom_victim(struct mm_struct *mm)
75 {
76 	return test_bit(MMF_OOM_VICTIM, &mm->flags);
77 }
78 
79 /*
80  * Checks whether a page fault on the given mm is still reliable.
81  * This is no longer true if the oom reaper started to reap the
82  * address space which is reflected by MMF_UNSTABLE flag set in
83  * the mm. At that moment any !shared mapping would lose the content
84  * and could cause a memory corruption (zero pages instead of the
85  * original content).
86  *
87  * User should call this before establishing a page table entry for
88  * a !shared mapping and under the proper page table lock.
89  *
90  * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
91  */
check_stable_address_space(struct mm_struct * mm)92 static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
93 {
94 	if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
95 		return VM_FAULT_SIGBUS;
96 	return 0;
97 }
98 
99 bool __oom_reap_task_mm(struct mm_struct *mm);
100 
101 extern unsigned long oom_badness(struct task_struct *p,
102 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
103 		unsigned long totalpages);
104 
105 extern bool out_of_memory(struct oom_control *oc);
106 
107 extern void exit_oom_victim(void);
108 
109 extern int register_oom_notifier(struct notifier_block *nb);
110 extern int unregister_oom_notifier(struct notifier_block *nb);
111 
112 extern bool oom_killer_disable(signed long timeout);
113 extern void oom_killer_enable(void);
114 
115 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
116 
117 /* sysctls */
118 extern int sysctl_oom_dump_tasks;
119 extern int sysctl_oom_kill_allocating_task;
120 extern int sysctl_panic_on_oom;
121 #endif /* _INCLUDE_LINUX_OOM_H */
122