]>
Commit | Line | Data |
---|---|---|
8ac773b4 AD |
1 | #ifndef __INCLUDE_LINUX_OOM_H |
2 | #define __INCLUDE_LINUX_OOM_H | |
3 | ||
5a3135c2 | 4 | |
a63d83f4 | 5 | #include <linux/sched.h> |
172acf60 | 6 | #include <linux/types.h> |
4365a567 | 7 | #include <linux/nodemask.h> |
607ca46e | 8 | #include <uapi/linux/oom.h> |
172acf60 DR |
9 | |
10 | struct zonelist; | |
11 | struct notifier_block; | |
74bcbf40 AM |
12 | struct mem_cgroup; |
13 | struct task_struct; | |
172acf60 | 14 | |
8989e4c7 DR |
15 | /* |
16 | * Details of the page allocation that triggered the oom killer that are used to | |
17 | * determine what should be killed. | |
18 | */ | |
6e0fc46d | 19 | struct oom_control { |
8989e4c7 | 20 | /* Used to determine cpuset */ |
6e0fc46d | 21 | struct zonelist *zonelist; |
8989e4c7 DR |
22 | |
23 | /* Used to determine mempolicy */ | |
24 | nodemask_t *nodemask; | |
25 | ||
2a966b77 VD |
26 | /* Memory cgroup in which oom is invoked, or NULL for global oom */ |
27 | struct mem_cgroup *memcg; | |
28 | ||
8989e4c7 DR |
29 | /* Used to determine cpuset and node locality requirement */ |
30 | const gfp_t gfp_mask; | |
31 | ||
32 | /* | |
33 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
34 | * for display purposes. | |
35 | */ | |
36 | const int order; | |
6e0fc46d DR |
37 | }; |
38 | ||
70e24bdf DR |
39 | /* |
40 | * Types of limitations to the nodes from which allocations may occur | |
41 | */ | |
42 | enum oom_constraint { | |
43 | CONSTRAINT_NONE, | |
44 | CONSTRAINT_CPUSET, | |
45 | CONSTRAINT_MEMORY_POLICY, | |
309ed882 | 46 | CONSTRAINT_MEMCG, |
70e24bdf DR |
47 | }; |
48 | ||
9cbb78bb DR |
49 | enum oom_scan_t { |
50 | OOM_SCAN_OK, /* scan thread and find its badness */ | |
51 | OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ | |
52 | OOM_SCAN_ABORT, /* abort the iteration and return */ | |
53 | OOM_SCAN_SELECT, /* always select this thread first */ | |
54 | }; | |
55 | ||
dc56401f JW |
56 | extern struct mutex oom_lock; |
57 | ||
e1e12d2f DR |
58 | static inline void set_current_oom_origin(void) |
59 | { | |
c96fc2d8 | 60 | current->signal->oom_flag_origin = true; |
e1e12d2f DR |
61 | } |
62 | ||
63 | static inline void clear_current_oom_origin(void) | |
64 | { | |
c96fc2d8 | 65 | current->signal->oom_flag_origin = false; |
e1e12d2f DR |
66 | } |
67 | ||
68 | static inline bool oom_task_origin(const struct task_struct *p) | |
69 | { | |
c96fc2d8 | 70 | return p->signal->oom_flag_origin; |
e1e12d2f | 71 | } |
72788c38 | 72 | |
16e95196 | 73 | extern void mark_oom_victim(struct task_struct *tsk); |
49550b60 | 74 | |
3ef22dff MH |
75 | #ifdef CONFIG_MMU |
76 | extern void try_oom_reaper(struct task_struct *tsk); | |
77 | #else | |
78 | static inline void try_oom_reaper(struct task_struct *tsk) | |
79 | { | |
80 | } | |
81 | #endif | |
82 | ||
a7f638f9 DR |
83 | extern unsigned long oom_badness(struct task_struct *p, |
84 | struct mem_cgroup *memcg, const nodemask_t *nodemask, | |
85 | unsigned long totalpages); | |
5695be14 | 86 | |
6e0fc46d | 87 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
9cbb78bb | 88 | unsigned int points, unsigned long totalpages, |
2a966b77 | 89 | const char *message); |
9cbb78bb | 90 | |
6e0fc46d | 91 | extern void check_panic_on_oom(struct oom_control *oc, |
2a966b77 | 92 | enum oom_constraint constraint); |
876aafbf | 93 | |
6e0fc46d DR |
94 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
95 | struct task_struct *task, unsigned long totalpages); | |
9cbb78bb | 96 | |
6e0fc46d | 97 | extern bool out_of_memory(struct oom_control *oc); |
16e95196 | 98 | |
36324a99 | 99 | extern void exit_oom_victim(struct task_struct *tsk); |
16e95196 | 100 | |
5a3135c2 DR |
101 | extern int register_oom_notifier(struct notifier_block *nb); |
102 | extern int unregister_oom_notifier(struct notifier_block *nb); | |
103 | ||
1a8670a2 | 104 | extern bool oom_killer_disabled; |
c32b3cbe MH |
105 | extern bool oom_killer_disable(void); |
106 | extern void oom_killer_enable(void); | |
8e4228e1 | 107 | |
158e0a2d KH |
108 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
109 | ||
d003f371 ON |
110 | static inline bool task_will_free_mem(struct task_struct *task) |
111 | { | |
98748bd7 MH |
112 | struct signal_struct *sig = task->signal; |
113 | ||
d003f371 ON |
114 | /* |
115 | * A coredumping process may sleep for an extended period in exit_mm(), | |
116 | * so the oom killer cannot assume that the process will promptly exit | |
117 | * and release memory. | |
118 | */ | |
98748bd7 MH |
119 | if (sig->flags & SIGNAL_GROUP_COREDUMP) |
120 | return false; | |
121 | ||
122 | if (!(task->flags & PF_EXITING)) | |
123 | return false; | |
124 | ||
125 | /* Make sure that the whole thread group is going down */ | |
126 | if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT)) | |
127 | return false; | |
128 | ||
129 | return true; | |
d003f371 ON |
130 | } |
131 | ||
8e4228e1 DR |
132 | /* sysctls */ |
133 | extern int sysctl_oom_dump_tasks; | |
134 | extern int sysctl_oom_kill_allocating_task; | |
135 | extern int sysctl_panic_on_oom; | |
5a3135c2 | 136 | #endif /* _INCLUDE_LINUX_OOM_H */ |