1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __INCLUDE_LINUX_OOM_H
3 #define __INCLUDE_LINUX_OOM_H
4
5
6 #include <linux/sched/signal.h>
7 #include <linux/types.h>
8 #include <linux/nodemask.h>
9 #include <uapi/linux/oom.h>
10 #include <linux/sched/coredump.h> /* MMF_* */
11 #include <linux/mm.h> /* VM_FAULT* */
12
13 struct zonelist;
14 struct notifier_block;
15 struct mem_cgroup;
16 struct task_struct;
17
18 enum oom_constraint {
19 CONSTRAINT_NONE,
20 CONSTRAINT_CPUSET,
21 CONSTRAINT_MEMORY_POLICY,
22 CONSTRAINT_MEMCG,
23 };
24
25 /*
26 * Details of the page allocation that triggered the oom killer that are used to
27 * determine what should be killed.
28 */
29 struct oom_control {
30 /* Used to determine cpuset */
31 struct zonelist *zonelist;
32
33 /* Used to determine mempolicy */
34 nodemask_t *nodemask;
35
36 /* Memory cgroup in which oom is invoked, or NULL for global oom */
37 struct mem_cgroup *memcg;
38
39 /* Used to determine cpuset and node locality requirement */
40 const gfp_t gfp_mask;
41
42 /*
43 * order == -1 means the oom kill is required by sysrq, otherwise only
44 * for display purposes.
45 */
46 const int order;
47
48 /* Used by oom implementation, do not set */
49 unsigned long totalpages;
50 struct task_struct *chosen;
51 unsigned long chosen_points;
52
53 /* Used to print the constraint info. */
54 enum oom_constraint constraint;
55 };
56
57 extern struct mutex oom_lock;
58
set_current_oom_origin(void)59 static inline void set_current_oom_origin(void)
60 {
61 current->signal->oom_flag_origin = true;
62 }
63
clear_current_oom_origin(void)64 static inline void clear_current_oom_origin(void)
65 {
66 current->signal->oom_flag_origin = false;
67 }
68
oom_task_origin(const struct task_struct * p)69 static inline bool oom_task_origin(const struct task_struct *p)
70 {
71 return p->signal->oom_flag_origin;
72 }
73
tsk_is_oom_victim(struct task_struct * tsk)74 static inline bool tsk_is_oom_victim(struct task_struct * tsk)
75 {
76 return tsk->signal->oom_mm;
77 }
78
79 /*
80 * Use this helper if tsk->mm != mm and the victim mm needs a special
81 * handling. This is guaranteed to stay true after once set.
82 */
mm_is_oom_victim(struct mm_struct * mm)83 static inline bool mm_is_oom_victim(struct mm_struct *mm)
84 {
85 return test_bit(MMF_OOM_VICTIM, &mm->flags);
86 }
87
88 /*
89 * Checks whether a page fault on the given mm is still reliable.
90 * This is no longer true if the oom reaper started to reap the
91 * address space which is reflected by MMF_UNSTABLE flag set in
92 * the mm. At that moment any !shared mapping would lose the content
93 * and could cause a memory corruption (zero pages instead of the
94 * original content).
95 *
96 * User should call this before establishing a page table entry for
97 * a !shared mapping and under the proper page table lock.
98 *
99 * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
100 */
check_stable_address_space(struct mm_struct * mm)101 static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
102 {
103 if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
104 return VM_FAULT_SIGBUS;
105 return 0;
106 }
107
108 bool __oom_reap_task_mm(struct mm_struct *mm);
109
110 extern unsigned long oom_badness(struct task_struct *p,
111 unsigned long totalpages);
112
113 extern bool out_of_memory(struct oom_control *oc);
114
115 extern void exit_oom_victim(void);
116
117 extern int register_oom_notifier(struct notifier_block *nb);
118 extern int unregister_oom_notifier(struct notifier_block *nb);
119
120 extern bool oom_killer_disable(signed long timeout);
121 extern void oom_killer_enable(void);
122
123 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
124
125 /* sysctls */
126 extern int sysctl_oom_dump_tasks;
127 extern int sysctl_oom_kill_allocating_task;
128 extern int sysctl_panic_on_oom;
129 #endif /* _INCLUDE_LINUX_OOM_H */
130