]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
a63d83f4 DR |
7 | * Copyright (C) 2010 Google, Inc. |
8 | * Rewritten by David Rientjes | |
1da177e4 LT |
9 | * |
10 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
11 | * we're seriously out of memory. This gets called from __alloc_pages() |
12 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
13 | * |
14 | * Since we won't call these routines often (on a well-configured | |
15 | * machine) this file will double as a 'coding guide' and a signpost | |
16 | * for newbie kernel hackers. It features several pointers to major | |
17 | * kernel subsystems and hints as to where to find out what things do. | |
18 | */ | |
19 | ||
8ac773b4 | 20 | #include <linux/oom.h> |
1da177e4 | 21 | #include <linux/mm.h> |
4e950f6f | 22 | #include <linux/err.h> |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
1da177e4 LT |
24 | #include <linux/sched.h> |
25 | #include <linux/swap.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/jiffies.h> | |
ef08e3b4 | 28 | #include <linux/cpuset.h> |
b95f1b31 | 29 | #include <linux/export.h> |
8bc719d3 | 30 | #include <linux/notifier.h> |
c7ba5c9e | 31 | #include <linux/memcontrol.h> |
6f48d0eb | 32 | #include <linux/mempolicy.h> |
5cd9c58f | 33 | #include <linux/security.h> |
edd45544 | 34 | #include <linux/ptrace.h> |
f660daac | 35 | #include <linux/freezer.h> |
43d2b113 | 36 | #include <linux/ftrace.h> |
dc3f21ea | 37 | #include <linux/ratelimit.h> |
43d2b113 KH |
38 | |
39 | #define CREATE_TRACE_POINTS | |
40 | #include <trace/events/oom.h> | |
1da177e4 | 41 | |
fadd8fbd | 42 | int sysctl_panic_on_oom; |
fe071d7e | 43 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 44 | int sysctl_oom_dump_tasks = 1; |
c7d4caeb | 45 | static DEFINE_SPINLOCK(zone_scan_lock); |
1da177e4 | 46 | |
6f48d0eb DR |
47 | #ifdef CONFIG_NUMA |
48 | /** | |
49 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
ad962441 | 50 | * @start: task struct of which task to consider |
6f48d0eb DR |
51 | * @mask: nodemask passed to page allocator for mempolicy ooms |
52 | * | |
53 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
54 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
55 | * and whether or not it has the same set of allowed cpuset nodes. | |
495789a5 | 56 | */ |
ad962441 | 57 | static bool has_intersects_mems_allowed(struct task_struct *start, |
6f48d0eb | 58 | const nodemask_t *mask) |
495789a5 | 59 | { |
ad962441 ON |
60 | struct task_struct *tsk; |
61 | bool ret = false; | |
495789a5 | 62 | |
ad962441 | 63 | rcu_read_lock(); |
1da4db0c | 64 | for_each_thread(start, tsk) { |
6f48d0eb DR |
65 | if (mask) { |
66 | /* | |
67 | * If this is a mempolicy constrained oom, tsk's | |
68 | * cpuset is irrelevant. Only return true if its | |
69 | * mempolicy intersects current, otherwise it may be | |
70 | * needlessly killed. | |
71 | */ | |
ad962441 | 72 | ret = mempolicy_nodemask_intersects(tsk, mask); |
6f48d0eb DR |
73 | } else { |
74 | /* | |
75 | * This is not a mempolicy constrained oom, so only | |
76 | * check the mems of tsk's cpuset. | |
77 | */ | |
ad962441 | 78 | ret = cpuset_mems_allowed_intersects(current, tsk); |
6f48d0eb | 79 | } |
ad962441 ON |
80 | if (ret) |
81 | break; | |
1da4db0c | 82 | } |
ad962441 | 83 | rcu_read_unlock(); |
df1090a8 | 84 | |
ad962441 | 85 | return ret; |
6f48d0eb DR |
86 | } |
87 | #else | |
88 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
89 | const nodemask_t *mask) | |
90 | { | |
91 | return true; | |
495789a5 | 92 | } |
6f48d0eb | 93 | #endif /* CONFIG_NUMA */ |
495789a5 | 94 | |
6f48d0eb DR |
95 | /* |
96 | * The process p may have detached its own ->mm while exiting or through | |
97 | * use_mm(), but one or more of its subthreads may still have a valid | |
98 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
99 | * task_lock() held. | |
100 | */ | |
158e0a2d | 101 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 | 102 | { |
1da4db0c | 103 | struct task_struct *t; |
dd8e8f40 | 104 | |
4d4048be ON |
105 | rcu_read_lock(); |
106 | ||
1da4db0c | 107 | for_each_thread(p, t) { |
dd8e8f40 ON |
108 | task_lock(t); |
109 | if (likely(t->mm)) | |
4d4048be | 110 | goto found; |
dd8e8f40 | 111 | task_unlock(t); |
1da4db0c | 112 | } |
4d4048be ON |
113 | t = NULL; |
114 | found: | |
115 | rcu_read_unlock(); | |
dd8e8f40 | 116 | |
4d4048be | 117 | return t; |
dd8e8f40 ON |
118 | } |
119 | ||
ab290adb | 120 | /* return true if the task is not adequate as candidate victim task. */ |
e85bfd3a | 121 | static bool oom_unkillable_task(struct task_struct *p, |
2314b42d | 122 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
ab290adb KM |
123 | { |
124 | if (is_global_init(p)) | |
125 | return true; | |
126 | if (p->flags & PF_KTHREAD) | |
127 | return true; | |
128 | ||
129 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
72835c86 | 130 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
ab290adb KM |
131 | return true; |
132 | ||
133 | /* p may not have freeable memory in nodemask */ | |
134 | if (!has_intersects_mems_allowed(p, nodemask)) | |
135 | return true; | |
136 | ||
137 | return false; | |
138 | } | |
139 | ||
1da177e4 | 140 | /** |
a63d83f4 | 141 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 142 | * @p: task struct of which task we should calculate |
a63d83f4 | 143 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 144 | * |
a63d83f4 DR |
145 | * The heuristic for determining which task to kill is made to be as simple and |
146 | * predictable as possible. The goal is to return the highest value for the | |
147 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 148 | */ |
a7f638f9 DR |
149 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
150 | const nodemask_t *nodemask, unsigned long totalpages) | |
1da177e4 | 151 | { |
1e11ad8d | 152 | long points; |
61eafb00 | 153 | long adj; |
28b83c51 | 154 | |
72835c86 | 155 | if (oom_unkillable_task(p, memcg, nodemask)) |
26ebc984 | 156 | return 0; |
1da177e4 | 157 | |
dd8e8f40 ON |
158 | p = find_lock_task_mm(p); |
159 | if (!p) | |
1da177e4 LT |
160 | return 0; |
161 | ||
a9c58b90 | 162 | adj = (long)p->signal->oom_score_adj; |
61eafb00 | 163 | if (adj == OOM_SCORE_ADJ_MIN) { |
5aecc85a MH |
164 | task_unlock(p); |
165 | return 0; | |
166 | } | |
167 | ||
1da177e4 | 168 | /* |
a63d83f4 | 169 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 170 | * task's rss, pagetable and swap space use. |
1da177e4 | 171 | */ |
dc6c9a35 KS |
172 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
173 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); | |
a63d83f4 | 174 | task_unlock(p); |
1da177e4 LT |
175 | |
176 | /* | |
a63d83f4 DR |
177 | * Root processes get 3% bonus, just like the __vm_enough_memory() |
178 | * implementation used by LSMs. | |
1da177e4 | 179 | */ |
a63d83f4 | 180 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
778c14af | 181 | points -= (points * 3) / 100; |
1da177e4 | 182 | |
61eafb00 DR |
183 | /* Normalize to oom_score_adj units */ |
184 | adj *= totalpages / 1000; | |
185 | points += adj; | |
1da177e4 | 186 | |
f19e8aa1 | 187 | /* |
a7f638f9 DR |
188 | * Never return 0 for an eligible task regardless of the root bonus and |
189 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | |
f19e8aa1 | 190 | */ |
1e11ad8d | 191 | return points > 0 ? points : 1; |
1da177e4 LT |
192 | } |
193 | ||
9b0f8b04 CL |
194 | /* |
195 | * Determine the type of allocation constraint. | |
196 | */ | |
9b0f8b04 | 197 | #ifdef CONFIG_NUMA |
4365a567 | 198 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, |
a63d83f4 DR |
199 | gfp_t gfp_mask, nodemask_t *nodemask, |
200 | unsigned long *totalpages) | |
4365a567 | 201 | { |
54a6eb5c | 202 | struct zone *zone; |
dd1a239f | 203 | struct zoneref *z; |
54a6eb5c | 204 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
a63d83f4 DR |
205 | bool cpuset_limited = false; |
206 | int nid; | |
9b0f8b04 | 207 | |
a63d83f4 DR |
208 | /* Default to all available memory */ |
209 | *totalpages = totalram_pages + total_swap_pages; | |
210 | ||
211 | if (!zonelist) | |
212 | return CONSTRAINT_NONE; | |
4365a567 KH |
213 | /* |
214 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
215 | * to kill current.We have to random task kill in this case. | |
216 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
217 | */ | |
218 | if (gfp_mask & __GFP_THISNODE) | |
219 | return CONSTRAINT_NONE; | |
9b0f8b04 | 220 | |
4365a567 | 221 | /* |
a63d83f4 DR |
222 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
223 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
224 | * is enforced in get_page_from_freelist(). | |
4365a567 | 225 | */ |
bd3a66c1 | 226 | if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) { |
a63d83f4 DR |
227 | *totalpages = total_swap_pages; |
228 | for_each_node_mask(nid, *nodemask) | |
229 | *totalpages += node_spanned_pages(nid); | |
9b0f8b04 | 230 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 231 | } |
4365a567 KH |
232 | |
233 | /* Check this allocation failure is caused by cpuset's wall function */ | |
234 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | |
235 | high_zoneidx, nodemask) | |
344736f2 | 236 | if (!cpuset_zone_allowed(zone, gfp_mask)) |
a63d83f4 | 237 | cpuset_limited = true; |
9b0f8b04 | 238 | |
a63d83f4 DR |
239 | if (cpuset_limited) { |
240 | *totalpages = total_swap_pages; | |
241 | for_each_node_mask(nid, cpuset_current_mems_allowed) | |
242 | *totalpages += node_spanned_pages(nid); | |
243 | return CONSTRAINT_CPUSET; | |
244 | } | |
9b0f8b04 CL |
245 | return CONSTRAINT_NONE; |
246 | } | |
4365a567 KH |
247 | #else |
248 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |
a63d83f4 DR |
249 | gfp_t gfp_mask, nodemask_t *nodemask, |
250 | unsigned long *totalpages) | |
4365a567 | 251 | { |
a63d83f4 | 252 | *totalpages = totalram_pages + total_swap_pages; |
4365a567 KH |
253 | return CONSTRAINT_NONE; |
254 | } | |
255 | #endif | |
9b0f8b04 | 256 | |
9cbb78bb DR |
257 | enum oom_scan_t oom_scan_process_thread(struct task_struct *task, |
258 | unsigned long totalpages, const nodemask_t *nodemask, | |
259 | bool force_kill) | |
462607ec | 260 | { |
9cbb78bb | 261 | if (oom_unkillable_task(task, NULL, nodemask)) |
462607ec DR |
262 | return OOM_SCAN_CONTINUE; |
263 | ||
264 | /* | |
265 | * This task already has access to memory reserves and is being killed. | |
266 | * Don't allow any other task to have access to the reserves. | |
267 | */ | |
268 | if (test_tsk_thread_flag(task, TIF_MEMDIE)) { | |
462607ec DR |
269 | if (!force_kill) |
270 | return OOM_SCAN_ABORT; | |
271 | } | |
272 | if (!task->mm) | |
273 | return OOM_SCAN_CONTINUE; | |
274 | ||
e1e12d2f DR |
275 | /* |
276 | * If task is allocating a lot of memory and has been marked to be | |
277 | * killed first if it triggers an oom, then select it. | |
278 | */ | |
279 | if (oom_task_origin(task)) | |
280 | return OOM_SCAN_SELECT; | |
281 | ||
6a2d5679 ON |
282 | if (task_will_free_mem(task) && !force_kill) |
283 | return OOM_SCAN_ABORT; | |
284 | ||
462607ec DR |
285 | return OOM_SCAN_OK; |
286 | } | |
287 | ||
1da177e4 LT |
288 | /* |
289 | * Simple selection loop. We chose the process with the highest | |
6b4f2b56 | 290 | * number of 'points'. Returns -1 on scan abort. |
1da177e4 LT |
291 | * |
292 | * (not docbooked, we don't want this one cluttering up the manual) | |
293 | */ | |
a63d83f4 | 294 | static struct task_struct *select_bad_process(unsigned int *ppoints, |
9cbb78bb DR |
295 | unsigned long totalpages, const nodemask_t *nodemask, |
296 | bool force_kill) | |
1da177e4 | 297 | { |
3a5dda7a | 298 | struct task_struct *g, *p; |
1da177e4 | 299 | struct task_struct *chosen = NULL; |
a7f638f9 | 300 | unsigned long chosen_points = 0; |
1da177e4 | 301 | |
6b0c81b3 | 302 | rcu_read_lock(); |
1da4db0c | 303 | for_each_process_thread(g, p) { |
a63d83f4 | 304 | unsigned int points; |
a49335cc | 305 | |
9cbb78bb | 306 | switch (oom_scan_process_thread(p, totalpages, nodemask, |
462607ec DR |
307 | force_kill)) { |
308 | case OOM_SCAN_SELECT: | |
309 | chosen = p; | |
310 | chosen_points = ULONG_MAX; | |
311 | /* fall through */ | |
312 | case OOM_SCAN_CONTINUE: | |
c027a474 | 313 | continue; |
462607ec | 314 | case OOM_SCAN_ABORT: |
6b0c81b3 | 315 | rcu_read_unlock(); |
6b4f2b56 | 316 | return (struct task_struct *)(-1UL); |
462607ec DR |
317 | case OOM_SCAN_OK: |
318 | break; | |
319 | }; | |
9cbb78bb | 320 | points = oom_badness(p, NULL, nodemask, totalpages); |
d49ad935 DR |
321 | if (!points || points < chosen_points) |
322 | continue; | |
323 | /* Prefer thread group leaders for display purposes */ | |
324 | if (points == chosen_points && thread_group_leader(chosen)) | |
325 | continue; | |
326 | ||
327 | chosen = p; | |
328 | chosen_points = points; | |
1da4db0c | 329 | } |
6b0c81b3 DR |
330 | if (chosen) |
331 | get_task_struct(chosen); | |
332 | rcu_read_unlock(); | |
972c4ea5 | 333 | |
a7f638f9 | 334 | *ppoints = chosen_points * 1000 / totalpages; |
1da177e4 LT |
335 | return chosen; |
336 | } | |
337 | ||
fef1bdd6 | 338 | /** |
1b578df0 | 339 | * dump_tasks - dump current memory state of all system tasks |
dad7557e | 340 | * @memcg: current's memory controller, if constrained |
e85bfd3a | 341 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
1b578df0 | 342 | * |
e85bfd3a DR |
343 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
344 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
345 | * are not shown. | |
de34d965 DR |
346 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, |
347 | * swapents, oom_score_adj value, and name. | |
fef1bdd6 | 348 | */ |
2314b42d | 349 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) |
fef1bdd6 | 350 | { |
c55db957 KM |
351 | struct task_struct *p; |
352 | struct task_struct *task; | |
fef1bdd6 | 353 | |
dc6c9a35 | 354 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); |
6b0c81b3 | 355 | rcu_read_lock(); |
c55db957 | 356 | for_each_process(p) { |
72835c86 | 357 | if (oom_unkillable_task(p, memcg, nodemask)) |
b4416d2b | 358 | continue; |
fef1bdd6 | 359 | |
c55db957 KM |
360 | task = find_lock_task_mm(p); |
361 | if (!task) { | |
6d2661ed | 362 | /* |
74ab7f1d DR |
363 | * This is a kthread or all of p's threads have already |
364 | * detached their mm's. There's no need to report | |
c55db957 | 365 | * them; they can't be oom killed anyway. |
6d2661ed | 366 | */ |
6d2661ed DR |
367 | continue; |
368 | } | |
c55db957 | 369 | |
dc6c9a35 | 370 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", |
078de5f7 EB |
371 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
372 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
e1f56c89 | 373 | atomic_long_read(&task->mm->nr_ptes), |
dc6c9a35 | 374 | mm_nr_pmds(task->mm), |
de34d965 | 375 | get_mm_counter(task->mm, MM_SWAPENTS), |
a63d83f4 | 376 | task->signal->oom_score_adj, task->comm); |
c55db957 KM |
377 | task_unlock(task); |
378 | } | |
6b0c81b3 | 379 | rcu_read_unlock(); |
fef1bdd6 DR |
380 | } |
381 | ||
d31f56db | 382 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
72835c86 | 383 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
1b604d75 | 384 | { |
5e9d834a | 385 | task_lock(current); |
1b604d75 | 386 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
a9c58b90 | 387 | "oom_score_adj=%hd\n", |
01dc52eb | 388 | current->comm, gfp_mask, order, |
a63d83f4 | 389 | current->signal->oom_score_adj); |
1b604d75 DR |
390 | cpuset_print_task_mems_allowed(current); |
391 | task_unlock(current); | |
392 | dump_stack(); | |
58cf188e SZ |
393 | if (memcg) |
394 | mem_cgroup_print_oom_info(memcg, p); | |
395 | else | |
396 | show_mem(SHOW_MEM_FILTER_NODES); | |
1b604d75 | 397 | if (sysctl_oom_dump_tasks) |
72835c86 | 398 | dump_tasks(memcg, nodemask); |
1b604d75 DR |
399 | } |
400 | ||
5695be14 | 401 | /* |
c32b3cbe | 402 | * Number of OOM victims in flight |
5695be14 | 403 | */ |
c32b3cbe MH |
404 | static atomic_t oom_victims = ATOMIC_INIT(0); |
405 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
5695be14 | 406 | |
c32b3cbe MH |
407 | bool oom_killer_disabled __read_mostly; |
408 | static DECLARE_RWSEM(oom_sem); | |
5695be14 | 409 | |
49550b60 MH |
410 | /** |
411 | * mark_tsk_oom_victim - marks the given taks as OOM victim. | |
412 | * @tsk: task to mark | |
c32b3cbe MH |
413 | * |
414 | * Has to be called with oom_sem taken for read and never after | |
415 | * oom has been disabled already. | |
49550b60 MH |
416 | */ |
417 | void mark_tsk_oom_victim(struct task_struct *tsk) | |
418 | { | |
c32b3cbe MH |
419 | WARN_ON(oom_killer_disabled); |
420 | /* OOM killer might race with memcg OOM */ | |
421 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
422 | return; | |
63a8ca9b MH |
423 | /* |
424 | * Make sure that the task is woken up from uninterruptible sleep | |
425 | * if it is frozen because OOM killer wouldn't be able to free | |
426 | * any memory and livelock. freezing_slow_path will tell the freezer | |
427 | * that TIF_MEMDIE tasks should be ignored. | |
428 | */ | |
429 | __thaw_task(tsk); | |
c32b3cbe | 430 | atomic_inc(&oom_victims); |
49550b60 MH |
431 | } |
432 | ||
433 | /** | |
434 | * unmark_oom_victim - unmarks the current task as OOM victim. | |
c32b3cbe MH |
435 | * |
436 | * Wakes up all waiters in oom_killer_disable() | |
49550b60 MH |
437 | */ |
438 | void unmark_oom_victim(void) | |
439 | { | |
c32b3cbe MH |
440 | if (!test_and_clear_thread_flag(TIF_MEMDIE)) |
441 | return; | |
442 | ||
443 | down_read(&oom_sem); | |
444 | /* | |
445 | * There is no need to signal the lasst oom_victim if there | |
446 | * is nobody who cares. | |
447 | */ | |
448 | if (!atomic_dec_return(&oom_victims) && oom_killer_disabled) | |
449 | wake_up_all(&oom_victims_wait); | |
450 | up_read(&oom_sem); | |
451 | } | |
452 | ||
453 | /** | |
454 | * oom_killer_disable - disable OOM killer | |
455 | * | |
456 | * Forces all page allocations to fail rather than trigger OOM killer. | |
457 | * Will block and wait until all OOM victims are killed. | |
458 | * | |
459 | * The function cannot be called when there are runnable user tasks because | |
460 | * the userspace would see unexpected allocation failures as a result. Any | |
461 | * new usage of this function should be consulted with MM people. | |
462 | * | |
463 | * Returns true if successful and false if the OOM killer cannot be | |
464 | * disabled. | |
465 | */ | |
466 | bool oom_killer_disable(void) | |
467 | { | |
468 | /* | |
469 | * Make sure to not race with an ongoing OOM killer | |
470 | * and that the current is not the victim. | |
471 | */ | |
472 | down_write(&oom_sem); | |
473 | if (test_thread_flag(TIF_MEMDIE)) { | |
474 | up_write(&oom_sem); | |
475 | return false; | |
476 | } | |
477 | ||
478 | oom_killer_disabled = true; | |
479 | up_write(&oom_sem); | |
480 | ||
481 | wait_event(oom_victims_wait, !atomic_read(&oom_victims)); | |
482 | ||
483 | return true; | |
484 | } | |
485 | ||
486 | /** | |
487 | * oom_killer_enable - enable OOM killer | |
488 | */ | |
489 | void oom_killer_enable(void) | |
490 | { | |
491 | down_write(&oom_sem); | |
492 | oom_killer_disabled = false; | |
493 | up_write(&oom_sem); | |
49550b60 MH |
494 | } |
495 | ||
3b4798cb | 496 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
6b0c81b3 DR |
497 | /* |
498 | * Must be called while holding a reference to p, which will be released upon | |
499 | * returning. | |
500 | */ | |
9cbb78bb DR |
501 | void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
502 | unsigned int points, unsigned long totalpages, | |
503 | struct mem_cgroup *memcg, nodemask_t *nodemask, | |
504 | const char *message) | |
1da177e4 | 505 | { |
52d3c036 | 506 | struct task_struct *victim = p; |
5e9d834a | 507 | struct task_struct *child; |
1da4db0c | 508 | struct task_struct *t; |
647f2bdf | 509 | struct mm_struct *mm; |
52d3c036 | 510 | unsigned int victim_points = 0; |
dc3f21ea DR |
511 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
512 | DEFAULT_RATELIMIT_BURST); | |
1da177e4 | 513 | |
50ec3bbf NP |
514 | /* |
515 | * If the task is already exiting, don't alarm the sysadmin or kill | |
516 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
517 | */ | |
83363b91 MH |
518 | task_lock(p); |
519 | if (p->mm && task_will_free_mem(p)) { | |
49550b60 | 520 | mark_tsk_oom_victim(p); |
83363b91 | 521 | task_unlock(p); |
6b0c81b3 | 522 | put_task_struct(p); |
2a1c9b1f | 523 | return; |
50ec3bbf | 524 | } |
83363b91 | 525 | task_unlock(p); |
50ec3bbf | 526 | |
dc3f21ea | 527 | if (__ratelimit(&oom_rs)) |
8447d950 DR |
528 | dump_header(p, gfp_mask, order, memcg, nodemask); |
529 | ||
5e9d834a | 530 | task_lock(p); |
a63d83f4 | 531 | pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n", |
5e9d834a DR |
532 | message, task_pid_nr(p), p->comm, points); |
533 | task_unlock(p); | |
f3af38d3 | 534 | |
5e9d834a DR |
535 | /* |
536 | * If any of p's children has a different mm and is eligible for kill, | |
11239836 | 537 | * the one with the highest oom_badness() score is sacrificed for its |
5e9d834a DR |
538 | * parent. This attempts to lose the minimal amount of work done while |
539 | * still freeing memory. | |
540 | */ | |
6b0c81b3 | 541 | read_lock(&tasklist_lock); |
1da4db0c | 542 | for_each_thread(p, t) { |
5e9d834a | 543 | list_for_each_entry(child, &t->children, sibling) { |
a63d83f4 | 544 | unsigned int child_points; |
5e9d834a | 545 | |
edd45544 DR |
546 | if (child->mm == p->mm) |
547 | continue; | |
a63d83f4 DR |
548 | /* |
549 | * oom_badness() returns 0 if the thread is unkillable | |
550 | */ | |
72835c86 | 551 | child_points = oom_badness(child, memcg, nodemask, |
a63d83f4 | 552 | totalpages); |
5e9d834a | 553 | if (child_points > victim_points) { |
6b0c81b3 | 554 | put_task_struct(victim); |
5e9d834a DR |
555 | victim = child; |
556 | victim_points = child_points; | |
6b0c81b3 | 557 | get_task_struct(victim); |
5e9d834a | 558 | } |
dd8e8f40 | 559 | } |
1da4db0c | 560 | } |
6b0c81b3 | 561 | read_unlock(&tasklist_lock); |
dd8e8f40 | 562 | |
6b0c81b3 DR |
563 | p = find_lock_task_mm(victim); |
564 | if (!p) { | |
6b0c81b3 | 565 | put_task_struct(victim); |
647f2bdf | 566 | return; |
6b0c81b3 DR |
567 | } else if (victim != p) { |
568 | get_task_struct(p); | |
569 | put_task_struct(victim); | |
570 | victim = p; | |
571 | } | |
647f2bdf DR |
572 | |
573 | /* mm cannot safely be dereferenced after task_unlock(victim) */ | |
574 | mm = victim->mm; | |
49550b60 | 575 | mark_tsk_oom_victim(victim); |
647f2bdf DR |
576 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
577 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), | |
578 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), | |
579 | K(get_mm_counter(victim->mm, MM_FILEPAGES))); | |
580 | task_unlock(victim); | |
581 | ||
582 | /* | |
583 | * Kill all user processes sharing victim->mm in other thread groups, if | |
584 | * any. They don't get access to memory reserves, though, to avoid | |
585 | * depletion of all memory. This prevents mm->mmap_sem livelock when an | |
586 | * oom killed thread cannot exit because it requires the semaphore and | |
587 | * its contended by another thread trying to allocate memory itself. | |
588 | * That thread will now get access to memory reserves since it has a | |
589 | * pending fatal signal. | |
590 | */ | |
4d4048be | 591 | rcu_read_lock(); |
647f2bdf DR |
592 | for_each_process(p) |
593 | if (p->mm == mm && !same_thread_group(p, victim) && | |
594 | !(p->flags & PF_KTHREAD)) { | |
595 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | |
596 | continue; | |
597 | ||
598 | task_lock(p); /* Protect ->comm from prctl() */ | |
599 | pr_err("Kill process %d (%s) sharing same memory\n", | |
600 | task_pid_nr(p), p->comm); | |
601 | task_unlock(p); | |
d2d39309 | 602 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); |
647f2bdf | 603 | } |
6b0c81b3 | 604 | rcu_read_unlock(); |
647f2bdf | 605 | |
d2d39309 | 606 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); |
6b0c81b3 | 607 | put_task_struct(victim); |
1da177e4 | 608 | } |
647f2bdf | 609 | #undef K |
1da177e4 | 610 | |
309ed882 DR |
611 | /* |
612 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
613 | */ | |
876aafbf DR |
614 | void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
615 | int order, const nodemask_t *nodemask) | |
309ed882 DR |
616 | { |
617 | if (likely(!sysctl_panic_on_oom)) | |
618 | return; | |
619 | if (sysctl_panic_on_oom != 2) { | |
620 | /* | |
621 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
622 | * does not panic for cpuset, mempolicy, or memcg allocation | |
623 | * failures. | |
624 | */ | |
625 | if (constraint != CONSTRAINT_NONE) | |
626 | return; | |
627 | } | |
e85bfd3a | 628 | dump_header(NULL, gfp_mask, order, NULL, nodemask); |
309ed882 DR |
629 | panic("Out of memory: %s panic_on_oom is enabled\n", |
630 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
631 | } | |
632 | ||
8bc719d3 MS |
633 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
634 | ||
635 | int register_oom_notifier(struct notifier_block *nb) | |
636 | { | |
637 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
638 | } | |
639 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
640 | ||
641 | int unregister_oom_notifier(struct notifier_block *nb) | |
642 | { | |
643 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
646 | ||
098d7f12 DR |
647 | /* |
648 | * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero | |
649 | * if a parallel OOM killing is already taking place that includes a zone in | |
650 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. | |
651 | */ | |
e972a070 | 652 | bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 653 | { |
dd1a239f MG |
654 | struct zoneref *z; |
655 | struct zone *zone; | |
e972a070 | 656 | bool ret = true; |
098d7f12 | 657 | |
c7d4caeb | 658 | spin_lock(&zone_scan_lock); |
e972a070 | 659 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) |
57054651 | 660 | if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) { |
e972a070 | 661 | ret = false; |
098d7f12 DR |
662 | goto out; |
663 | } | |
dd1a239f | 664 | |
e972a070 DR |
665 | /* |
666 | * Lock each zone in the zonelist under zone_scan_lock so a parallel | |
667 | * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. | |
668 | */ | |
669 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) | |
57054651 | 670 | set_bit(ZONE_OOM_LOCKED, &zone->flags); |
098d7f12 | 671 | |
098d7f12 | 672 | out: |
c7d4caeb | 673 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
674 | return ret; |
675 | } | |
676 | ||
677 | /* | |
678 | * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed | |
679 | * allocation attempts with zonelists containing them may now recall the OOM | |
680 | * killer, if necessary. | |
681 | */ | |
e972a070 | 682 | void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 683 | { |
dd1a239f MG |
684 | struct zoneref *z; |
685 | struct zone *zone; | |
098d7f12 | 686 | |
c7d4caeb | 687 | spin_lock(&zone_scan_lock); |
e972a070 | 688 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) |
57054651 | 689 | clear_bit(ZONE_OOM_LOCKED, &zone->flags); |
c7d4caeb | 690 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
691 | } |
692 | ||
1da177e4 | 693 | /** |
c32b3cbe | 694 | * __out_of_memory - kill the "best" process when we run out of memory |
1b578df0 RD |
695 | * @zonelist: zonelist pointer |
696 | * @gfp_mask: memory allocation flags | |
697 | * @order: amount of memory being requested as a power of 2 | |
6f48d0eb | 698 | * @nodemask: nodemask passed to page allocator |
08ab9b10 | 699 | * @force_kill: true if a task must be killed, even if others are exiting |
1da177e4 LT |
700 | * |
701 | * If we run out of memory, we have the choice between either | |
702 | * killing a random task (bad), letting the system crash (worse) | |
703 | * OR try to be smart about which process to kill. Note that we | |
704 | * don't have to be perfect here, we just have to be good. | |
705 | */ | |
c32b3cbe | 706 | static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
08ab9b10 | 707 | int order, nodemask_t *nodemask, bool force_kill) |
1da177e4 | 708 | { |
e85bfd3a | 709 | const nodemask_t *mpol_mask; |
0aad4b31 | 710 | struct task_struct *p; |
a63d83f4 | 711 | unsigned long totalpages; |
8bc719d3 | 712 | unsigned long freed = 0; |
9cbb78bb | 713 | unsigned int uninitialized_var(points); |
e3658932 | 714 | enum oom_constraint constraint = CONSTRAINT_NONE; |
b52723c5 | 715 | int killed = 0; |
8bc719d3 MS |
716 | |
717 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
718 | if (freed > 0) | |
719 | /* Got some memory back in the last second. */ | |
720 | return; | |
1da177e4 | 721 | |
7b98c2e4 | 722 | /* |
9ff4868e DR |
723 | * If current has a pending SIGKILL or is exiting, then automatically |
724 | * select it. The goal is to allow it to allocate so that it may | |
725 | * quickly exit and free its memory. | |
d7a94e7e TH |
726 | * |
727 | * But don't select if current has already released its mm and cleared | |
728 | * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur. | |
7b98c2e4 | 729 | */ |
d7a94e7e TH |
730 | if (current->mm && |
731 | (fatal_signal_pending(current) || task_will_free_mem(current))) { | |
49550b60 | 732 | mark_tsk_oom_victim(current); |
7b98c2e4 DR |
733 | return; |
734 | } | |
735 | ||
9b0f8b04 CL |
736 | /* |
737 | * Check if there were limitations on the allocation (only relevant for | |
738 | * NUMA) that may require different handling. | |
739 | */ | |
a63d83f4 DR |
740 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, |
741 | &totalpages); | |
e85bfd3a DR |
742 | mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; |
743 | check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); | |
0aad4b31 | 744 | |
121d1ba0 | 745 | if (sysctl_oom_kill_allocating_task && current->mm && |
a96cfd6e | 746 | !oom_unkillable_task(current, NULL, nodemask) && |
121d1ba0 | 747 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 748 | get_task_struct(current); |
2a1c9b1f DR |
749 | oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, |
750 | nodemask, | |
751 | "Out of memory (oom_kill_allocating_task)"); | |
752 | goto out; | |
0aad4b31 DR |
753 | } |
754 | ||
9cbb78bb | 755 | p = select_bad_process(&points, totalpages, mpol_mask, force_kill); |
0aad4b31 DR |
756 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
757 | if (!p) { | |
e85bfd3a | 758 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); |
0aad4b31 DR |
759 | panic("Out of memory and no killable processes...\n"); |
760 | } | |
6b4f2b56 | 761 | if (p != (void *)-1UL) { |
2a1c9b1f DR |
762 | oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, |
763 | nodemask, "Out of memory"); | |
764 | killed = 1; | |
765 | } | |
b52723c5 | 766 | out: |
1da177e4 | 767 | /* |
4f774b91 DR |
768 | * Give the killed threads a good chance of exiting before trying to |
769 | * allocate memory again. | |
1da177e4 | 770 | */ |
4f774b91 DR |
771 | if (killed) |
772 | schedule_timeout_killable(1); | |
1da177e4 | 773 | } |
e3658932 | 774 | |
c32b3cbe MH |
775 | /** |
776 | * out_of_memory - tries to invoke OOM killer. | |
777 | * @zonelist: zonelist pointer | |
778 | * @gfp_mask: memory allocation flags | |
779 | * @order: amount of memory being requested as a power of 2 | |
780 | * @nodemask: nodemask passed to page allocator | |
781 | * @force_kill: true if a task must be killed, even if others are exiting | |
782 | * | |
783 | * invokes __out_of_memory if the OOM is not disabled by oom_killer_disable() | |
784 | * when it returns false. Otherwise returns true. | |
785 | */ | |
786 | bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |
787 | int order, nodemask_t *nodemask, bool force_kill) | |
788 | { | |
789 | bool ret = false; | |
790 | ||
791 | down_read(&oom_sem); | |
792 | if (!oom_killer_disabled) { | |
793 | __out_of_memory(zonelist, gfp_mask, order, nodemask, force_kill); | |
794 | ret = true; | |
795 | } | |
796 | up_read(&oom_sem); | |
797 | ||
798 | return ret; | |
799 | } | |
800 | ||
e3658932 DR |
801 | /* |
802 | * The pagefault handler calls here because it is out of memory, so kill a | |
efacd02e DR |
803 | * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a |
804 | * parallel oom killing is already in progress so do nothing. | |
e3658932 DR |
805 | */ |
806 | void pagefault_out_of_memory(void) | |
807 | { | |
3812c8c8 | 808 | struct zonelist *zonelist; |
efacd02e | 809 | |
c32b3cbe | 810 | down_read(&oom_sem); |
49426420 | 811 | if (mem_cgroup_oom_synchronize(true)) |
c32b3cbe | 812 | goto unlock; |
3812c8c8 | 813 | |
8d060bf4 | 814 | zonelist = node_zonelist(first_memory_node, GFP_KERNEL); |
e972a070 | 815 | if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) { |
c32b3cbe MH |
816 | if (!oom_killer_disabled) |
817 | __out_of_memory(NULL, 0, 0, NULL, false); | |
818 | else | |
819 | /* | |
820 | * There shouldn't be any user tasks runable while the | |
821 | * OOM killer is disabled so the current task has to | |
822 | * be a racing OOM victim for which oom_killer_disable() | |
823 | * is waiting for. | |
824 | */ | |
825 | WARN_ON(test_thread_flag(TIF_MEMDIE)); | |
826 | ||
e972a070 | 827 | oom_zonelist_unlock(zonelist, GFP_KERNEL); |
e3658932 | 828 | } |
c32b3cbe MH |
829 | unlock: |
830 | up_read(&oom_sem); | |
e3658932 | 831 | } |