]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/mm/oom_kill.c | |
4 | * | |
5 | * Copyright (C) 1998,2000 Rik van Riel | |
6 | * Thanks go out to Claus Fischer for some serious inspiration and | |
7 | * for goading me into coding this file... | |
a63d83f4 DR |
8 | * Copyright (C) 2010 Google, Inc. |
9 | * Rewritten by David Rientjes | |
1da177e4 LT |
10 | * |
11 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
12 | * we're seriously out of memory. This gets called from __alloc_pages() |
13 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
14 | * |
15 | * Since we won't call these routines often (on a well-configured | |
16 | * machine) this file will double as a 'coding guide' and a signpost | |
17 | * for newbie kernel hackers. It features several pointers to major | |
18 | * kernel subsystems and hints as to where to find out what things do. | |
19 | */ | |
20 | ||
8ac773b4 | 21 | #include <linux/oom.h> |
1da177e4 | 22 | #include <linux/mm.h> |
4e950f6f | 23 | #include <linux/err.h> |
5a0e3ad6 | 24 | #include <linux/gfp.h> |
1da177e4 | 25 | #include <linux/sched.h> |
6e84f315 | 26 | #include <linux/sched/mm.h> |
f7ccbae4 | 27 | #include <linux/sched/coredump.h> |
29930025 | 28 | #include <linux/sched/task.h> |
8a7ff02a | 29 | #include <linux/sched/debug.h> |
1da177e4 LT |
30 | #include <linux/swap.h> |
31 | #include <linux/timex.h> | |
32 | #include <linux/jiffies.h> | |
ef08e3b4 | 33 | #include <linux/cpuset.h> |
b95f1b31 | 34 | #include <linux/export.h> |
8bc719d3 | 35 | #include <linux/notifier.h> |
c7ba5c9e | 36 | #include <linux/memcontrol.h> |
6f48d0eb | 37 | #include <linux/mempolicy.h> |
5cd9c58f | 38 | #include <linux/security.h> |
edd45544 | 39 | #include <linux/ptrace.h> |
f660daac | 40 | #include <linux/freezer.h> |
43d2b113 | 41 | #include <linux/ftrace.h> |
dc3f21ea | 42 | #include <linux/ratelimit.h> |
aac45363 MH |
43 | #include <linux/kthread.h> |
44 | #include <linux/init.h> | |
4d4bbd85 | 45 | #include <linux/mmu_notifier.h> |
aac45363 MH |
46 | |
47 | #include <asm/tlb.h> | |
48 | #include "internal.h" | |
852d8be0 | 49 | #include "slab.h" |
43d2b113 KH |
50 | |
51 | #define CREATE_TRACE_POINTS | |
52 | #include <trace/events/oom.h> | |
1da177e4 | 53 | |
fadd8fbd | 54 | int sysctl_panic_on_oom; |
fe071d7e | 55 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 56 | int sysctl_oom_dump_tasks = 1; |
dc56401f | 57 | |
a195d3f5 MH |
58 | /* |
59 | * Serializes oom killer invocations (out_of_memory()) from all contexts to | |
60 | * prevent from over eager oom killing (e.g. when the oom killer is invoked | |
61 | * from different domains). | |
62 | * | |
63 | * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled | |
64 | * and mark_oom_victim | |
65 | */ | |
dc56401f | 66 | DEFINE_MUTEX(oom_lock); |
1da177e4 | 67 | |
ac311a14 SB |
68 | static inline bool is_memcg_oom(struct oom_control *oc) |
69 | { | |
70 | return oc->memcg != NULL; | |
71 | } | |
72 | ||
6f48d0eb DR |
73 | #ifdef CONFIG_NUMA |
74 | /** | |
ac311a14 | 75 | * oom_cpuset_eligible() - check task eligiblity for kill |
ad962441 | 76 | * @start: task struct of which task to consider |
f364f06b | 77 | * @oc: pointer to struct oom_control |
6f48d0eb DR |
78 | * |
79 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
80 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
81 | * and whether or not it has the same set of allowed cpuset nodes. | |
ac311a14 SB |
82 | * |
83 | * This function is assuming oom-killer context and 'current' has triggered | |
84 | * the oom-killer. | |
495789a5 | 85 | */ |
ac311a14 SB |
86 | static bool oom_cpuset_eligible(struct task_struct *start, |
87 | struct oom_control *oc) | |
495789a5 | 88 | { |
ad962441 ON |
89 | struct task_struct *tsk; |
90 | bool ret = false; | |
ac311a14 SB |
91 | const nodemask_t *mask = oc->nodemask; |
92 | ||
93 | if (is_memcg_oom(oc)) | |
94 | return true; | |
495789a5 | 95 | |
ad962441 | 96 | rcu_read_lock(); |
1da4db0c | 97 | for_each_thread(start, tsk) { |
6f48d0eb DR |
98 | if (mask) { |
99 | /* | |
100 | * If this is a mempolicy constrained oom, tsk's | |
101 | * cpuset is irrelevant. Only return true if its | |
102 | * mempolicy intersects current, otherwise it may be | |
103 | * needlessly killed. | |
104 | */ | |
ad962441 | 105 | ret = mempolicy_nodemask_intersects(tsk, mask); |
6f48d0eb DR |
106 | } else { |
107 | /* | |
108 | * This is not a mempolicy constrained oom, so only | |
109 | * check the mems of tsk's cpuset. | |
110 | */ | |
ad962441 | 111 | ret = cpuset_mems_allowed_intersects(current, tsk); |
6f48d0eb | 112 | } |
ad962441 ON |
113 | if (ret) |
114 | break; | |
1da4db0c | 115 | } |
ad962441 | 116 | rcu_read_unlock(); |
df1090a8 | 117 | |
ad962441 | 118 | return ret; |
6f48d0eb DR |
119 | } |
120 | #else | |
ac311a14 | 121 | static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) |
6f48d0eb DR |
122 | { |
123 | return true; | |
495789a5 | 124 | } |
6f48d0eb | 125 | #endif /* CONFIG_NUMA */ |
495789a5 | 126 | |
6f48d0eb DR |
127 | /* |
128 | * The process p may have detached its own ->mm while exiting or through | |
f5678e7f | 129 | * kthread_use_mm(), but one or more of its subthreads may still have a valid |
6f48d0eb DR |
130 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
131 | * task_lock() held. | |
132 | */ | |
158e0a2d | 133 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 | 134 | { |
1da4db0c | 135 | struct task_struct *t; |
dd8e8f40 | 136 | |
4d4048be ON |
137 | rcu_read_lock(); |
138 | ||
1da4db0c | 139 | for_each_thread(p, t) { |
dd8e8f40 ON |
140 | task_lock(t); |
141 | if (likely(t->mm)) | |
4d4048be | 142 | goto found; |
dd8e8f40 | 143 | task_unlock(t); |
1da4db0c | 144 | } |
4d4048be ON |
145 | t = NULL; |
146 | found: | |
147 | rcu_read_unlock(); | |
dd8e8f40 | 148 | |
4d4048be | 149 | return t; |
dd8e8f40 ON |
150 | } |
151 | ||
db2a0dd7 YB |
152 | /* |
153 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
154 | * for display purposes. | |
155 | */ | |
156 | static inline bool is_sysrq_oom(struct oom_control *oc) | |
157 | { | |
158 | return oc->order == -1; | |
159 | } | |
160 | ||
ab290adb | 161 | /* return true if the task is not adequate as candidate victim task. */ |
ac311a14 | 162 | static bool oom_unkillable_task(struct task_struct *p) |
ab290adb KM |
163 | { |
164 | if (is_global_init(p)) | |
165 | return true; | |
166 | if (p->flags & PF_KTHREAD) | |
167 | return true; | |
ab290adb KM |
168 | return false; |
169 | } | |
170 | ||
852d8be0 YS |
171 | /* |
172 | * Print out unreclaimble slabs info when unreclaimable slabs amount is greater | |
173 | * than all user memory (LRU pages) | |
174 | */ | |
175 | static bool is_dump_unreclaim_slabs(void) | |
176 | { | |
177 | unsigned long nr_lru; | |
178 | ||
179 | nr_lru = global_node_page_state(NR_ACTIVE_ANON) + | |
180 | global_node_page_state(NR_INACTIVE_ANON) + | |
181 | global_node_page_state(NR_ACTIVE_FILE) + | |
182 | global_node_page_state(NR_INACTIVE_FILE) + | |
183 | global_node_page_state(NR_ISOLATED_ANON) + | |
184 | global_node_page_state(NR_ISOLATED_FILE) + | |
185 | global_node_page_state(NR_UNEVICTABLE); | |
186 | ||
d42f3245 | 187 | return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru); |
852d8be0 YS |
188 | } |
189 | ||
1da177e4 | 190 | /** |
a63d83f4 | 191 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 192 | * @p: task struct of which task we should calculate |
a63d83f4 | 193 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 194 | * |
a63d83f4 DR |
195 | * The heuristic for determining which task to kill is made to be as simple and |
196 | * predictable as possible. The goal is to return the highest value for the | |
197 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 198 | */ |
9066e5cf | 199 | long oom_badness(struct task_struct *p, unsigned long totalpages) |
1da177e4 | 200 | { |
1e11ad8d | 201 | long points; |
61eafb00 | 202 | long adj; |
28b83c51 | 203 | |
ac311a14 | 204 | if (oom_unkillable_task(p)) |
9066e5cf | 205 | return LONG_MIN; |
1da177e4 | 206 | |
dd8e8f40 ON |
207 | p = find_lock_task_mm(p); |
208 | if (!p) | |
9066e5cf | 209 | return LONG_MIN; |
1da177e4 | 210 | |
bb8a4b7f MH |
211 | /* |
212 | * Do not even consider tasks which are explicitly marked oom | |
b18dc5f2 MH |
213 | * unkillable or have been already oom reaped or the are in |
214 | * the middle of vfork | |
bb8a4b7f | 215 | */ |
a9c58b90 | 216 | adj = (long)p->signal->oom_score_adj; |
bb8a4b7f | 217 | if (adj == OOM_SCORE_ADJ_MIN || |
862e3073 | 218 | test_bit(MMF_OOM_SKIP, &p->mm->flags) || |
b18dc5f2 | 219 | in_vfork(p)) { |
5aecc85a | 220 | task_unlock(p); |
9066e5cf | 221 | return LONG_MIN; |
5aecc85a MH |
222 | } |
223 | ||
1da177e4 | 224 | /* |
a63d83f4 | 225 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 226 | * task's rss, pagetable and swap space use. |
1da177e4 | 227 | */ |
dc6c9a35 | 228 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
af5b0f6a | 229 | mm_pgtables_bytes(p->mm) / PAGE_SIZE; |
a63d83f4 | 230 | task_unlock(p); |
1da177e4 | 231 | |
61eafb00 DR |
232 | /* Normalize to oom_score_adj units */ |
233 | adj *= totalpages / 1000; | |
234 | points += adj; | |
1da177e4 | 235 | |
9066e5cf | 236 | return points; |
1da177e4 LT |
237 | } |
238 | ||
ef8444ea | 239 | static const char * const oom_constraint_text[] = { |
240 | [CONSTRAINT_NONE] = "CONSTRAINT_NONE", | |
241 | [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET", | |
242 | [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY", | |
243 | [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG", | |
7c5f64f8 VD |
244 | }; |
245 | ||
9b0f8b04 CL |
246 | /* |
247 | * Determine the type of allocation constraint. | |
248 | */ | |
7c5f64f8 | 249 | static enum oom_constraint constrained_alloc(struct oom_control *oc) |
4365a567 | 250 | { |
54a6eb5c | 251 | struct zone *zone; |
dd1a239f | 252 | struct zoneref *z; |
97a225e6 | 253 | enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); |
a63d83f4 DR |
254 | bool cpuset_limited = false; |
255 | int nid; | |
9b0f8b04 | 256 | |
7c5f64f8 | 257 | if (is_memcg_oom(oc)) { |
bbec2e15 | 258 | oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; |
7c5f64f8 VD |
259 | return CONSTRAINT_MEMCG; |
260 | } | |
261 | ||
a63d83f4 | 262 | /* Default to all available memory */ |
ca79b0c2 | 263 | oc->totalpages = totalram_pages() + total_swap_pages; |
7c5f64f8 VD |
264 | |
265 | if (!IS_ENABLED(CONFIG_NUMA)) | |
266 | return CONSTRAINT_NONE; | |
a63d83f4 | 267 | |
6e0fc46d | 268 | if (!oc->zonelist) |
a63d83f4 | 269 | return CONSTRAINT_NONE; |
4365a567 KH |
270 | /* |
271 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
272 | * to kill current.We have to random task kill in this case. | |
273 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
274 | */ | |
6e0fc46d | 275 | if (oc->gfp_mask & __GFP_THISNODE) |
4365a567 | 276 | return CONSTRAINT_NONE; |
9b0f8b04 | 277 | |
4365a567 | 278 | /* |
a63d83f4 DR |
279 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
280 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
281 | * is enforced in get_page_from_freelist(). | |
4365a567 | 282 | */ |
6e0fc46d DR |
283 | if (oc->nodemask && |
284 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { | |
7c5f64f8 | 285 | oc->totalpages = total_swap_pages; |
6e0fc46d | 286 | for_each_node_mask(nid, *oc->nodemask) |
1eb41bb0 | 287 | oc->totalpages += node_present_pages(nid); |
9b0f8b04 | 288 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 289 | } |
4365a567 KH |
290 | |
291 | /* Check this allocation failure is caused by cpuset's wall function */ | |
6e0fc46d | 292 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, |
97a225e6 | 293 | highest_zoneidx, oc->nodemask) |
6e0fc46d | 294 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) |
a63d83f4 | 295 | cpuset_limited = true; |
9b0f8b04 | 296 | |
a63d83f4 | 297 | if (cpuset_limited) { |
7c5f64f8 | 298 | oc->totalpages = total_swap_pages; |
a63d83f4 | 299 | for_each_node_mask(nid, cpuset_current_mems_allowed) |
1eb41bb0 | 300 | oc->totalpages += node_present_pages(nid); |
a63d83f4 DR |
301 | return CONSTRAINT_CPUSET; |
302 | } | |
9b0f8b04 CL |
303 | return CONSTRAINT_NONE; |
304 | } | |
305 | ||
7c5f64f8 | 306 | static int oom_evaluate_task(struct task_struct *task, void *arg) |
462607ec | 307 | { |
7c5f64f8 | 308 | struct oom_control *oc = arg; |
9066e5cf | 309 | long points; |
7c5f64f8 | 310 | |
ac311a14 SB |
311 | if (oom_unkillable_task(task)) |
312 | goto next; | |
313 | ||
314 | /* p may not have freeable memory in nodemask */ | |
315 | if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) | |
7c5f64f8 | 316 | goto next; |
462607ec DR |
317 | |
318 | /* | |
319 | * This task already has access to memory reserves and is being killed. | |
a373966d | 320 | * Don't allow any other task to have access to the reserves unless |
862e3073 | 321 | * the task has MMF_OOM_SKIP because chances that it would release |
a373966d | 322 | * any memory is quite low. |
462607ec | 323 | */ |
862e3073 MH |
324 | if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { |
325 | if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) | |
7c5f64f8 VD |
326 | goto next; |
327 | goto abort; | |
a373966d | 328 | } |
462607ec | 329 | |
e1e12d2f DR |
330 | /* |
331 | * If task is allocating a lot of memory and has been marked to be | |
332 | * killed first if it triggers an oom, then select it. | |
333 | */ | |
7c5f64f8 | 334 | if (oom_task_origin(task)) { |
9066e5cf | 335 | points = LONG_MAX; |
7c5f64f8 VD |
336 | goto select; |
337 | } | |
e1e12d2f | 338 | |
ac311a14 | 339 | points = oom_badness(task, oc->totalpages); |
9066e5cf | 340 | if (points == LONG_MIN || points < oc->chosen_points) |
7c5f64f8 VD |
341 | goto next; |
342 | ||
7c5f64f8 VD |
343 | select: |
344 | if (oc->chosen) | |
345 | put_task_struct(oc->chosen); | |
346 | get_task_struct(task); | |
347 | oc->chosen = task; | |
348 | oc->chosen_points = points; | |
349 | next: | |
350 | return 0; | |
351 | abort: | |
352 | if (oc->chosen) | |
353 | put_task_struct(oc->chosen); | |
354 | oc->chosen = (void *)-1UL; | |
355 | return 1; | |
462607ec DR |
356 | } |
357 | ||
1da177e4 | 358 | /* |
7c5f64f8 VD |
359 | * Simple selection loop. We choose the process with the highest number of |
360 | * 'points'. In case scan was aborted, oc->chosen is set to -1. | |
1da177e4 | 361 | */ |
7c5f64f8 | 362 | static void select_bad_process(struct oom_control *oc) |
1da177e4 | 363 | { |
9066e5cf YS |
364 | oc->chosen_points = LONG_MIN; |
365 | ||
7c5f64f8 VD |
366 | if (is_memcg_oom(oc)) |
367 | mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); | |
368 | else { | |
369 | struct task_struct *p; | |
d49ad935 | 370 | |
7c5f64f8 VD |
371 | rcu_read_lock(); |
372 | for_each_process(p) | |
373 | if (oom_evaluate_task(p, oc)) | |
374 | break; | |
375 | rcu_read_unlock(); | |
1da4db0c | 376 | } |
1da177e4 LT |
377 | } |
378 | ||
5eee7e1c SB |
379 | static int dump_task(struct task_struct *p, void *arg) |
380 | { | |
381 | struct oom_control *oc = arg; | |
382 | struct task_struct *task; | |
383 | ||
ac311a14 SB |
384 | if (oom_unkillable_task(p)) |
385 | return 0; | |
386 | ||
387 | /* p may not have freeable memory in nodemask */ | |
388 | if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc)) | |
5eee7e1c SB |
389 | return 0; |
390 | ||
391 | task = find_lock_task_mm(p); | |
392 | if (!task) { | |
393 | /* | |
394 | * This is a kthread or all of p's threads have already | |
395 | * detached their mm's. There's no need to report | |
396 | * them; they can't be oom killed anyway. | |
397 | */ | |
398 | return 0; | |
399 | } | |
400 | ||
401 | pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", | |
402 | task->pid, from_kuid(&init_user_ns, task_uid(task)), | |
403 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
404 | mm_pgtables_bytes(task->mm), | |
405 | get_mm_counter(task->mm, MM_SWAPENTS), | |
406 | task->signal->oom_score_adj, task->comm); | |
407 | task_unlock(task); | |
408 | ||
409 | return 0; | |
410 | } | |
411 | ||
fef1bdd6 | 412 | /** |
1b578df0 | 413 | * dump_tasks - dump current memory state of all system tasks |
5eee7e1c | 414 | * @oc: pointer to struct oom_control |
1b578df0 | 415 | * |
e85bfd3a DR |
416 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
417 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
418 | * are not shown. | |
af5b0f6a KS |
419 | * State information includes task's pid, uid, tgid, vm size, rss, |
420 | * pgtables_bytes, swapents, oom_score_adj value, and name. | |
fef1bdd6 | 421 | */ |
5eee7e1c | 422 | static void dump_tasks(struct oom_control *oc) |
fef1bdd6 | 423 | { |
c3b78b11 RF |
424 | pr_info("Tasks state (memory values in pages):\n"); |
425 | pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); | |
fef1bdd6 | 426 | |
5eee7e1c SB |
427 | if (is_memcg_oom(oc)) |
428 | mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); | |
429 | else { | |
430 | struct task_struct *p; | |
c55db957 | 431 | |
5eee7e1c SB |
432 | rcu_read_lock(); |
433 | for_each_process(p) | |
434 | dump_task(p, oc); | |
435 | rcu_read_unlock(); | |
c55db957 | 436 | } |
fef1bdd6 DR |
437 | } |
438 | ||
ef8444ea | 439 | static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) |
440 | { | |
441 | /* one line summary of the oom killer context. */ | |
442 | pr_info("oom-kill:constraint=%s,nodemask=%*pbl", | |
443 | oom_constraint_text[oc->constraint], | |
444 | nodemask_pr_args(oc->nodemask)); | |
445 | cpuset_print_current_mems_allowed(); | |
f0c867d9 | 446 | mem_cgroup_print_oom_context(oc->memcg, victim); |
ef8444ea | 447 | pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, |
448 | from_kuid(&init_user_ns, task_uid(victim))); | |
449 | } | |
450 | ||
2a966b77 | 451 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
1b604d75 | 452 | { |
ef8444ea | 453 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", |
454 | current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, | |
0205f755 | 455 | current->signal->oom_score_adj); |
9254990f MH |
456 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
457 | pr_warn("COMPACTION is disabled!!!\n"); | |
a0795cd4 | 458 | |
1b604d75 | 459 | dump_stack(); |
852d8be0 | 460 | if (is_memcg_oom(oc)) |
f0c867d9 | 461 | mem_cgroup_print_oom_meminfo(oc->memcg); |
852d8be0 | 462 | else { |
299c517a | 463 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
852d8be0 YS |
464 | if (is_dump_unreclaim_slabs()) |
465 | dump_unreclaimable_slab(); | |
466 | } | |
1b604d75 | 467 | if (sysctl_oom_dump_tasks) |
5eee7e1c | 468 | dump_tasks(oc); |
ef8444ea | 469 | if (p) |
470 | dump_oom_summary(oc, p); | |
1b604d75 DR |
471 | } |
472 | ||
5695be14 | 473 | /* |
c32b3cbe | 474 | * Number of OOM victims in flight |
5695be14 | 475 | */ |
c32b3cbe MH |
476 | static atomic_t oom_victims = ATOMIC_INIT(0); |
477 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
5695be14 | 478 | |
7c5f64f8 | 479 | static bool oom_killer_disabled __read_mostly; |
5695be14 | 480 | |
bc448e89 MH |
481 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
482 | ||
3ef22dff MH |
483 | /* |
484 | * task->mm can be NULL if the task is the exited group leader. So to | |
485 | * determine whether the task is using a particular mm, we examine all the | |
486 | * task's threads: if one of those is using this mm then this task was also | |
487 | * using it. | |
488 | */ | |
44a70ade | 489 | bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) |
3ef22dff MH |
490 | { |
491 | struct task_struct *t; | |
492 | ||
493 | for_each_thread(p, t) { | |
494 | struct mm_struct *t_mm = READ_ONCE(t->mm); | |
495 | if (t_mm) | |
496 | return t_mm == mm; | |
497 | } | |
498 | return false; | |
499 | } | |
500 | ||
aac45363 MH |
501 | #ifdef CONFIG_MMU |
502 | /* | |
503 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM | |
504 | * victim (if that is possible) to help the OOM killer to move on. | |
505 | */ | |
506 | static struct task_struct *oom_reaper_th; | |
aac45363 | 507 | static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); |
29c696e1 | 508 | static struct task_struct *oom_reaper_list; |
03049269 MH |
509 | static DEFINE_SPINLOCK(oom_reaper_lock); |
510 | ||
93065ac7 | 511 | bool __oom_reap_task_mm(struct mm_struct *mm) |
aac45363 | 512 | { |
aac45363 | 513 | struct vm_area_struct *vma; |
93065ac7 | 514 | bool ret = true; |
27ae357f DR |
515 | |
516 | /* | |
517 | * Tell all users of get_user/copy_from_user etc... that the content | |
518 | * is no longer stable. No barriers really needed because unmapping | |
519 | * should imply barriers already and the reader would hit a page fault | |
520 | * if it stumbled over a reaped memory. | |
521 | */ | |
522 | set_bit(MMF_UNSTABLE, &mm->flags); | |
523 | ||
524 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | |
9c276cc6 | 525 | if (!can_madv_lru_vma(vma)) |
27ae357f DR |
526 | continue; |
527 | ||
528 | /* | |
529 | * Only anonymous pages have a good chance to be dropped | |
530 | * without additional steps which we cannot afford as we | |
531 | * are OOM already. | |
532 | * | |
533 | * We do not even care about fs backed pages because all | |
534 | * which are reclaimable have already been reclaimed and | |
535 | * we do not want to block exit_mmap by keeping mm ref | |
536 | * count elevated without a good reason. | |
537 | */ | |
538 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { | |
ac46d4f3 | 539 | struct mmu_notifier_range range; |
27ae357f DR |
540 | struct mmu_gather tlb; |
541 | ||
6f4f13e8 JG |
542 | mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, |
543 | vma, mm, vma->vm_start, | |
ac46d4f3 JG |
544 | vma->vm_end); |
545 | tlb_gather_mmu(&tlb, mm, range.start, range.end); | |
546 | if (mmu_notifier_invalidate_range_start_nonblock(&range)) { | |
547 | tlb_finish_mmu(&tlb, range.start, range.end); | |
93065ac7 MH |
548 | ret = false; |
549 | continue; | |
550 | } | |
ac46d4f3 JG |
551 | unmap_page_range(&tlb, vma, range.start, range.end, NULL); |
552 | mmu_notifier_invalidate_range_end(&range); | |
553 | tlb_finish_mmu(&tlb, range.start, range.end); | |
27ae357f DR |
554 | } |
555 | } | |
93065ac7 MH |
556 | |
557 | return ret; | |
27ae357f DR |
558 | } |
559 | ||
431f42fd MH |
560 | /* |
561 | * Reaps the address space of the give task. | |
562 | * | |
563 | * Returns true on success and false if none or part of the address space | |
564 | * has been reclaimed and the caller should retry later. | |
565 | */ | |
27ae357f DR |
566 | static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
567 | { | |
aac45363 MH |
568 | bool ret = true; |
569 | ||
d8ed45c5 | 570 | if (!mmap_read_trylock(mm)) { |
422580c3 | 571 | trace_skip_task_reaping(tsk->pid); |
af5679fb | 572 | return false; |
4d4bbd85 MH |
573 | } |
574 | ||
e5e3f4c4 | 575 | /* |
21292580 AA |
576 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't |
577 | * work on the mm anymore. The check for MMF_OOM_SKIP must run | |
3e4e28c5 ML |
578 | * under mmap_lock for reading because it serializes against the |
579 | * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). | |
e5e3f4c4 | 580 | */ |
21292580 | 581 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) { |
422580c3 | 582 | trace_skip_task_reaping(tsk->pid); |
431f42fd | 583 | goto out_unlock; |
aac45363 MH |
584 | } |
585 | ||
422580c3 RG |
586 | trace_start_task_reaping(tsk->pid); |
587 | ||
93065ac7 | 588 | /* failed to reap part of the address space. Try again later */ |
431f42fd MH |
589 | ret = __oom_reap_task_mm(mm); |
590 | if (!ret) | |
591 | goto out_finish; | |
aac45363 | 592 | |
bc448e89 MH |
593 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
594 | task_pid_nr(tsk), tsk->comm, | |
595 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
596 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
597 | K(get_mm_counter(mm, MM_SHMEMPAGES))); | |
431f42fd MH |
598 | out_finish: |
599 | trace_finish_task_reaping(tsk->pid); | |
600 | out_unlock: | |
d8ed45c5 | 601 | mmap_read_unlock(mm); |
36324a99 | 602 | |
aac45363 MH |
603 | return ret; |
604 | } | |
605 | ||
bc448e89 | 606 | #define MAX_OOM_REAP_RETRIES 10 |
36324a99 | 607 | static void oom_reap_task(struct task_struct *tsk) |
aac45363 MH |
608 | { |
609 | int attempts = 0; | |
26db62f1 | 610 | struct mm_struct *mm = tsk->signal->oom_mm; |
aac45363 | 611 | |
3e4e28c5 | 612 | /* Retry the mmap_read_trylock(mm) a few times */ |
27ae357f | 613 | while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) |
aac45363 MH |
614 | schedule_timeout_idle(HZ/10); |
615 | ||
97b1255c TH |
616 | if (attempts <= MAX_OOM_REAP_RETRIES || |
617 | test_bit(MMF_OOM_SKIP, &mm->flags)) | |
7ebffa45 | 618 | goto done; |
11a410d5 | 619 | |
7ebffa45 TH |
620 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", |
621 | task_pid_nr(tsk), tsk->comm); | |
8a7ff02a | 622 | sched_show_task(tsk); |
7ebffa45 | 623 | debug_show_all_locks(); |
bc448e89 | 624 | |
7ebffa45 | 625 | done: |
449d777d | 626 | tsk->oom_reaper_list = NULL; |
449d777d | 627 | |
26db62f1 MH |
628 | /* |
629 | * Hide this mm from OOM killer because it has been either reaped or | |
3e4e28c5 | 630 | * somebody can't call mmap_write_unlock(mm). |
26db62f1 | 631 | */ |
862e3073 | 632 | set_bit(MMF_OOM_SKIP, &mm->flags); |
26db62f1 | 633 | |
aac45363 | 634 | /* Drop a reference taken by wake_oom_reaper */ |
36324a99 | 635 | put_task_struct(tsk); |
aac45363 MH |
636 | } |
637 | ||
638 | static int oom_reaper(void *unused) | |
639 | { | |
640 | while (true) { | |
03049269 | 641 | struct task_struct *tsk = NULL; |
aac45363 | 642 | |
29c696e1 | 643 | wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); |
03049269 | 644 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
645 | if (oom_reaper_list != NULL) { |
646 | tsk = oom_reaper_list; | |
647 | oom_reaper_list = tsk->oom_reaper_list; | |
03049269 MH |
648 | } |
649 | spin_unlock(&oom_reaper_lock); | |
650 | ||
651 | if (tsk) | |
652 | oom_reap_task(tsk); | |
aac45363 MH |
653 | } |
654 | ||
655 | return 0; | |
656 | } | |
657 | ||
7c5f64f8 | 658 | static void wake_oom_reaper(struct task_struct *tsk) |
aac45363 | 659 | { |
9bcdeb51 TH |
660 | /* mm is already queued? */ |
661 | if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) | |
aac45363 MH |
662 | return; |
663 | ||
36324a99 | 664 | get_task_struct(tsk); |
aac45363 | 665 | |
03049269 | 666 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
667 | tsk->oom_reaper_list = oom_reaper_list; |
668 | oom_reaper_list = tsk; | |
03049269 | 669 | spin_unlock(&oom_reaper_lock); |
422580c3 | 670 | trace_wake_reaper(tsk->pid); |
03049269 | 671 | wake_up(&oom_reaper_wait); |
aac45363 MH |
672 | } |
673 | ||
674 | static int __init oom_init(void) | |
675 | { | |
676 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); | |
aac45363 MH |
677 | return 0; |
678 | } | |
679 | subsys_initcall(oom_init) | |
7c5f64f8 VD |
680 | #else |
681 | static inline void wake_oom_reaper(struct task_struct *tsk) | |
682 | { | |
683 | } | |
684 | #endif /* CONFIG_MMU */ | |
aac45363 | 685 | |
49550b60 | 686 | /** |
16e95196 | 687 | * mark_oom_victim - mark the given task as OOM victim |
49550b60 | 688 | * @tsk: task to mark |
c32b3cbe | 689 | * |
dc56401f | 690 | * Has to be called with oom_lock held and never after |
c32b3cbe | 691 | * oom has been disabled already. |
26db62f1 MH |
692 | * |
693 | * tsk->mm has to be non NULL and caller has to guarantee it is stable (either | |
694 | * under task_lock or operate on the current). | |
49550b60 | 695 | */ |
7c5f64f8 | 696 | static void mark_oom_victim(struct task_struct *tsk) |
49550b60 | 697 | { |
26db62f1 MH |
698 | struct mm_struct *mm = tsk->mm; |
699 | ||
c32b3cbe MH |
700 | WARN_ON(oom_killer_disabled); |
701 | /* OOM killer might race with memcg OOM */ | |
702 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
703 | return; | |
26db62f1 | 704 | |
26db62f1 | 705 | /* oom_mm is bound to the signal struct life time. */ |
4837fe37 | 706 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { |
f1f10076 | 707 | mmgrab(tsk->signal->oom_mm); |
4837fe37 MH |
708 | set_bit(MMF_OOM_VICTIM, &mm->flags); |
709 | } | |
26db62f1 | 710 | |
63a8ca9b MH |
711 | /* |
712 | * Make sure that the task is woken up from uninterruptible sleep | |
713 | * if it is frozen because OOM killer wouldn't be able to free | |
714 | * any memory and livelock. freezing_slow_path will tell the freezer | |
715 | * that TIF_MEMDIE tasks should be ignored. | |
716 | */ | |
717 | __thaw_task(tsk); | |
c32b3cbe | 718 | atomic_inc(&oom_victims); |
422580c3 | 719 | trace_mark_victim(tsk->pid); |
49550b60 MH |
720 | } |
721 | ||
722 | /** | |
16e95196 | 723 | * exit_oom_victim - note the exit of an OOM victim |
49550b60 | 724 | */ |
38531201 | 725 | void exit_oom_victim(void) |
49550b60 | 726 | { |
38531201 | 727 | clear_thread_flag(TIF_MEMDIE); |
c32b3cbe | 728 | |
c38f1025 | 729 | if (!atomic_dec_return(&oom_victims)) |
c32b3cbe | 730 | wake_up_all(&oom_victims_wait); |
c32b3cbe MH |
731 | } |
732 | ||
7d2e7a22 MH |
733 | /** |
734 | * oom_killer_enable - enable OOM killer | |
735 | */ | |
736 | void oom_killer_enable(void) | |
737 | { | |
738 | oom_killer_disabled = false; | |
d75da004 | 739 | pr_info("OOM killer enabled.\n"); |
7d2e7a22 MH |
740 | } |
741 | ||
c32b3cbe MH |
742 | /** |
743 | * oom_killer_disable - disable OOM killer | |
7d2e7a22 | 744 | * @timeout: maximum timeout to wait for oom victims in jiffies |
c32b3cbe MH |
745 | * |
746 | * Forces all page allocations to fail rather than trigger OOM killer. | |
7d2e7a22 MH |
747 | * Will block and wait until all OOM victims are killed or the given |
748 | * timeout expires. | |
c32b3cbe MH |
749 | * |
750 | * The function cannot be called when there are runnable user tasks because | |
751 | * the userspace would see unexpected allocation failures as a result. Any | |
752 | * new usage of this function should be consulted with MM people. | |
753 | * | |
754 | * Returns true if successful and false if the OOM killer cannot be | |
755 | * disabled. | |
756 | */ | |
7d2e7a22 | 757 | bool oom_killer_disable(signed long timeout) |
c32b3cbe | 758 | { |
7d2e7a22 MH |
759 | signed long ret; |
760 | ||
c32b3cbe | 761 | /* |
6afcf289 TH |
762 | * Make sure to not race with an ongoing OOM killer. Check that the |
763 | * current is not killed (possibly due to sharing the victim's memory). | |
c32b3cbe | 764 | */ |
6afcf289 | 765 | if (mutex_lock_killable(&oom_lock)) |
c32b3cbe | 766 | return false; |
c32b3cbe | 767 | oom_killer_disabled = true; |
dc56401f | 768 | mutex_unlock(&oom_lock); |
c32b3cbe | 769 | |
7d2e7a22 MH |
770 | ret = wait_event_interruptible_timeout(oom_victims_wait, |
771 | !atomic_read(&oom_victims), timeout); | |
772 | if (ret <= 0) { | |
773 | oom_killer_enable(); | |
774 | return false; | |
775 | } | |
d75da004 | 776 | pr_info("OOM killer disabled.\n"); |
c32b3cbe MH |
777 | |
778 | return true; | |
779 | } | |
780 | ||
1af8bb43 MH |
781 | static inline bool __task_will_free_mem(struct task_struct *task) |
782 | { | |
783 | struct signal_struct *sig = task->signal; | |
784 | ||
785 | /* | |
786 | * A coredumping process may sleep for an extended period in exit_mm(), | |
787 | * so the oom killer cannot assume that the process will promptly exit | |
788 | * and release memory. | |
789 | */ | |
790 | if (sig->flags & SIGNAL_GROUP_COREDUMP) | |
791 | return false; | |
792 | ||
793 | if (sig->flags & SIGNAL_GROUP_EXIT) | |
794 | return true; | |
795 | ||
796 | if (thread_group_empty(task) && (task->flags & PF_EXITING)) | |
797 | return true; | |
798 | ||
799 | return false; | |
800 | } | |
801 | ||
802 | /* | |
803 | * Checks whether the given task is dying or exiting and likely to | |
804 | * release its address space. This means that all threads and processes | |
805 | * sharing the same mm have to be killed or exiting. | |
091f362c MH |
806 | * Caller has to make sure that task->mm is stable (hold task_lock or |
807 | * it operates on the current). | |
1af8bb43 | 808 | */ |
7c5f64f8 | 809 | static bool task_will_free_mem(struct task_struct *task) |
1af8bb43 | 810 | { |
091f362c | 811 | struct mm_struct *mm = task->mm; |
1af8bb43 | 812 | struct task_struct *p; |
f33e6f06 | 813 | bool ret = true; |
1af8bb43 | 814 | |
1af8bb43 | 815 | /* |
091f362c MH |
816 | * Skip tasks without mm because it might have passed its exit_mm and |
817 | * exit_oom_victim. oom_reaper could have rescued that but do not rely | |
818 | * on that for now. We can consider find_lock_task_mm in future. | |
1af8bb43 | 819 | */ |
091f362c | 820 | if (!mm) |
1af8bb43 MH |
821 | return false; |
822 | ||
091f362c MH |
823 | if (!__task_will_free_mem(task)) |
824 | return false; | |
696453e6 MH |
825 | |
826 | /* | |
827 | * This task has already been drained by the oom reaper so there are | |
828 | * only small chances it will free some more | |
829 | */ | |
862e3073 | 830 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) |
696453e6 | 831 | return false; |
696453e6 | 832 | |
091f362c | 833 | if (atomic_read(&mm->mm_users) <= 1) |
1af8bb43 | 834 | return true; |
1af8bb43 MH |
835 | |
836 | /* | |
5870c2e1 MH |
837 | * Make sure that all tasks which share the mm with the given tasks |
838 | * are dying as well to make sure that a) nobody pins its mm and | |
839 | * b) the task is also reapable by the oom reaper. | |
1af8bb43 MH |
840 | */ |
841 | rcu_read_lock(); | |
842 | for_each_process(p) { | |
843 | if (!process_shares_mm(p, mm)) | |
844 | continue; | |
845 | if (same_thread_group(task, p)) | |
846 | continue; | |
847 | ret = __task_will_free_mem(p); | |
848 | if (!ret) | |
849 | break; | |
850 | } | |
851 | rcu_read_unlock(); | |
1af8bb43 MH |
852 | |
853 | return ret; | |
854 | } | |
855 | ||
bbbe4802 | 856 | static void __oom_kill_process(struct task_struct *victim, const char *message) |
1da177e4 | 857 | { |
5989ad7b | 858 | struct task_struct *p; |
647f2bdf | 859 | struct mm_struct *mm; |
bb29902a | 860 | bool can_oom_reap = true; |
1da177e4 | 861 | |
6b0c81b3 DR |
862 | p = find_lock_task_mm(victim); |
863 | if (!p) { | |
6b0c81b3 | 864 | put_task_struct(victim); |
647f2bdf | 865 | return; |
6b0c81b3 DR |
866 | } else if (victim != p) { |
867 | get_task_struct(p); | |
868 | put_task_struct(victim); | |
869 | victim = p; | |
870 | } | |
647f2bdf | 871 | |
880b7689 | 872 | /* Get a reference to safely compare mm after task_unlock(victim) */ |
647f2bdf | 873 | mm = victim->mm; |
f1f10076 | 874 | mmgrab(mm); |
8e675f7a KK |
875 | |
876 | /* Raise event before sending signal: task reaper must see this */ | |
877 | count_vm_event(OOM_KILL); | |
fe6bdfc8 | 878 | memcg_memory_event_mm(mm, MEMCG_OOM_KILL); |
8e675f7a | 879 | |
426fb5e7 | 880 | /* |
cd04ae1e MH |
881 | * We should send SIGKILL before granting access to memory reserves |
882 | * in order to prevent the OOM victim from depleting the memory | |
883 | * reserves from the user space under its control. | |
426fb5e7 | 884 | */ |
079b22dc | 885 | do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID); |
16e95196 | 886 | mark_oom_victim(victim); |
70cb6d26 EC |
887 | pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n", |
888 | message, task_pid_nr(victim), victim->comm, K(mm->total_vm), | |
889 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
890 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
891 | K(get_mm_counter(mm, MM_SHMEMPAGES)), | |
892 | from_kuid(&init_user_ns, task_uid(victim)), | |
941f762b | 893 | mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); |
647f2bdf DR |
894 | task_unlock(victim); |
895 | ||
896 | /* | |
897 | * Kill all user processes sharing victim->mm in other thread groups, if | |
898 | * any. They don't get access to memory reserves, though, to avoid | |
c1e8d7c6 | 899 | * depletion of all memory. This prevents mm->mmap_lock livelock when an |
647f2bdf DR |
900 | * oom killed thread cannot exit because it requires the semaphore and |
901 | * its contended by another thread trying to allocate memory itself. | |
902 | * That thread will now get access to memory reserves since it has a | |
903 | * pending fatal signal. | |
904 | */ | |
4d4048be | 905 | rcu_read_lock(); |
c319025a | 906 | for_each_process(p) { |
4d7b3394 | 907 | if (!process_shares_mm(p, mm)) |
c319025a ON |
908 | continue; |
909 | if (same_thread_group(p, victim)) | |
910 | continue; | |
1b51e65e | 911 | if (is_global_init(p)) { |
aac45363 | 912 | can_oom_reap = false; |
862e3073 | 913 | set_bit(MMF_OOM_SKIP, &mm->flags); |
a373966d MH |
914 | pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", |
915 | task_pid_nr(victim), victim->comm, | |
916 | task_pid_nr(p), p->comm); | |
c319025a | 917 | continue; |
aac45363 | 918 | } |
1b51e65e | 919 | /* |
f5678e7f CH |
920 | * No kthead_use_mm() user needs to read from the userspace so |
921 | * we are ok to reap it. | |
1b51e65e MH |
922 | */ |
923 | if (unlikely(p->flags & PF_KTHREAD)) | |
924 | continue; | |
079b22dc | 925 | do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID); |
c319025a | 926 | } |
6b0c81b3 | 927 | rcu_read_unlock(); |
647f2bdf | 928 | |
aac45363 | 929 | if (can_oom_reap) |
36324a99 | 930 | wake_oom_reaper(victim); |
aac45363 | 931 | |
880b7689 | 932 | mmdrop(mm); |
6b0c81b3 | 933 | put_task_struct(victim); |
1da177e4 | 934 | } |
647f2bdf | 935 | #undef K |
1da177e4 | 936 | |
3d8b38eb RG |
937 | /* |
938 | * Kill provided task unless it's secured by setting | |
939 | * oom_score_adj to OOM_SCORE_ADJ_MIN. | |
940 | */ | |
bbbe4802 | 941 | static int oom_kill_memcg_member(struct task_struct *task, void *message) |
3d8b38eb | 942 | { |
d342a0b3 TH |
943 | if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && |
944 | !is_global_init(task)) { | |
3d8b38eb | 945 | get_task_struct(task); |
bbbe4802 | 946 | __oom_kill_process(task, message); |
3d8b38eb RG |
947 | } |
948 | return 0; | |
949 | } | |
950 | ||
5989ad7b RG |
951 | static void oom_kill_process(struct oom_control *oc, const char *message) |
952 | { | |
bbbe4802 | 953 | struct task_struct *victim = oc->chosen; |
3d8b38eb | 954 | struct mem_cgroup *oom_group; |
5989ad7b RG |
955 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
956 | DEFAULT_RATELIMIT_BURST); | |
957 | ||
958 | /* | |
959 | * If the task is already exiting, don't alarm the sysadmin or kill | |
960 | * its children or threads, just give it access to memory reserves | |
961 | * so it can die quickly | |
962 | */ | |
bbbe4802 SB |
963 | task_lock(victim); |
964 | if (task_will_free_mem(victim)) { | |
965 | mark_oom_victim(victim); | |
966 | wake_oom_reaper(victim); | |
967 | task_unlock(victim); | |
968 | put_task_struct(victim); | |
5989ad7b RG |
969 | return; |
970 | } | |
bbbe4802 | 971 | task_unlock(victim); |
5989ad7b RG |
972 | |
973 | if (__ratelimit(&oom_rs)) | |
bbbe4802 | 974 | dump_header(oc, victim); |
5989ad7b | 975 | |
3d8b38eb RG |
976 | /* |
977 | * Do we need to kill the entire memory cgroup? | |
978 | * Or even one of the ancestor memory cgroups? | |
979 | * Check this out before killing the victim task. | |
980 | */ | |
981 | oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); | |
982 | ||
bbbe4802 | 983 | __oom_kill_process(victim, message); |
3d8b38eb RG |
984 | |
985 | /* | |
986 | * If necessary, kill all tasks in the selected memory cgroup. | |
987 | */ | |
988 | if (oom_group) { | |
989 | mem_cgroup_print_oom_group(oom_group); | |
bbbe4802 SB |
990 | mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, |
991 | (void*)message); | |
3d8b38eb RG |
992 | mem_cgroup_put(oom_group); |
993 | } | |
5989ad7b RG |
994 | } |
995 | ||
309ed882 DR |
996 | /* |
997 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
998 | */ | |
432b1de0 | 999 | static void check_panic_on_oom(struct oom_control *oc) |
309ed882 DR |
1000 | { |
1001 | if (likely(!sysctl_panic_on_oom)) | |
1002 | return; | |
1003 | if (sysctl_panic_on_oom != 2) { | |
1004 | /* | |
1005 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
1006 | * does not panic for cpuset, mempolicy, or memcg allocation | |
1007 | * failures. | |
1008 | */ | |
432b1de0 | 1009 | if (oc->constraint != CONSTRAINT_NONE) |
309ed882 DR |
1010 | return; |
1011 | } | |
071a4bef | 1012 | /* Do not panic for oom kills triggered by sysrq */ |
db2a0dd7 | 1013 | if (is_sysrq_oom(oc)) |
071a4bef | 1014 | return; |
2a966b77 | 1015 | dump_header(oc, NULL); |
309ed882 DR |
1016 | panic("Out of memory: %s panic_on_oom is enabled\n", |
1017 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
1018 | } | |
1019 | ||
8bc719d3 MS |
1020 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
1021 | ||
1022 | int register_oom_notifier(struct notifier_block *nb) | |
1023 | { | |
1024 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
1025 | } | |
1026 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
1027 | ||
1028 | int unregister_oom_notifier(struct notifier_block *nb) | |
1029 | { | |
1030 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
1033 | ||
1da177e4 | 1034 | /** |
6e0fc46d DR |
1035 | * out_of_memory - kill the "best" process when we run out of memory |
1036 | * @oc: pointer to struct oom_control | |
1da177e4 LT |
1037 | * |
1038 | * If we run out of memory, we have the choice between either | |
1039 | * killing a random task (bad), letting the system crash (worse) | |
1040 | * OR try to be smart about which process to kill. Note that we | |
1041 | * don't have to be perfect here, we just have to be good. | |
1042 | */ | |
6e0fc46d | 1043 | bool out_of_memory(struct oom_control *oc) |
1da177e4 | 1044 | { |
8bc719d3 MS |
1045 | unsigned long freed = 0; |
1046 | ||
dc56401f JW |
1047 | if (oom_killer_disabled) |
1048 | return false; | |
1049 | ||
7c5f64f8 VD |
1050 | if (!is_memcg_oom(oc)) { |
1051 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
1052 | if (freed > 0) | |
1053 | /* Got some memory back in the last second. */ | |
1054 | return true; | |
1055 | } | |
1da177e4 | 1056 | |
7b98c2e4 | 1057 | /* |
9ff4868e DR |
1058 | * If current has a pending SIGKILL or is exiting, then automatically |
1059 | * select it. The goal is to allow it to allocate so that it may | |
1060 | * quickly exit and free its memory. | |
7b98c2e4 | 1061 | */ |
091f362c | 1062 | if (task_will_free_mem(current)) { |
16e95196 | 1063 | mark_oom_victim(current); |
1af8bb43 | 1064 | wake_oom_reaper(current); |
75e8f8b2 | 1065 | return true; |
7b98c2e4 DR |
1066 | } |
1067 | ||
3da88fb3 MH |
1068 | /* |
1069 | * The OOM killer does not compensate for IO-less reclaim. | |
1070 | * pagefault_out_of_memory lost its gfp context so we have to | |
1071 | * make sure exclude 0 mask - all other users should have at least | |
f9c64562 TH |
1072 | * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to |
1073 | * invoke the OOM killer even if it is a GFP_NOFS allocation. | |
3da88fb3 | 1074 | */ |
f9c64562 | 1075 | if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) |
3da88fb3 MH |
1076 | return true; |
1077 | ||
9b0f8b04 CL |
1078 | /* |
1079 | * Check if there were limitations on the allocation (only relevant for | |
7c5f64f8 | 1080 | * NUMA and memcg) that may require different handling. |
9b0f8b04 | 1081 | */ |
432b1de0 YS |
1082 | oc->constraint = constrained_alloc(oc); |
1083 | if (oc->constraint != CONSTRAINT_MEMORY_POLICY) | |
6e0fc46d | 1084 | oc->nodemask = NULL; |
432b1de0 | 1085 | check_panic_on_oom(oc); |
0aad4b31 | 1086 | |
7c5f64f8 | 1087 | if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && |
ac311a14 SB |
1088 | current->mm && !oom_unkillable_task(current) && |
1089 | oom_cpuset_eligible(current, oc) && | |
121d1ba0 | 1090 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 1091 | get_task_struct(current); |
7c5f64f8 VD |
1092 | oc->chosen = current; |
1093 | oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); | |
75e8f8b2 | 1094 | return true; |
0aad4b31 DR |
1095 | } |
1096 | ||
7c5f64f8 | 1097 | select_bad_process(oc); |
3100dab2 JW |
1098 | /* Found nothing?!?! */ |
1099 | if (!oc->chosen) { | |
2a966b77 | 1100 | dump_header(oc, NULL); |
3100dab2 JW |
1101 | pr_warn("Out of memory and no killable processes...\n"); |
1102 | /* | |
1103 | * If we got here due to an actual allocation at the | |
1104 | * system level, we cannot survive this and will enter | |
1105 | * an endless loop in the allocator. Bail out now. | |
1106 | */ | |
1107 | if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) | |
1108 | panic("System is deadlocked on memory\n"); | |
0aad4b31 | 1109 | } |
9bfe5ded | 1110 | if (oc->chosen && oc->chosen != (void *)-1UL) |
7c5f64f8 VD |
1111 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : |
1112 | "Memory cgroup out of memory"); | |
7c5f64f8 | 1113 | return !!oc->chosen; |
c32b3cbe MH |
1114 | } |
1115 | ||
e3658932 DR |
1116 | /* |
1117 | * The pagefault handler calls here because it is out of memory, so kill a | |
798fd756 VD |
1118 | * memory-hogging task. If oom_lock is held by somebody else, a parallel oom |
1119 | * killing is already in progress so do nothing. | |
e3658932 DR |
1120 | */ |
1121 | void pagefault_out_of_memory(void) | |
1122 | { | |
6e0fc46d DR |
1123 | struct oom_control oc = { |
1124 | .zonelist = NULL, | |
1125 | .nodemask = NULL, | |
2a966b77 | 1126 | .memcg = NULL, |
6e0fc46d DR |
1127 | .gfp_mask = 0, |
1128 | .order = 0, | |
6e0fc46d DR |
1129 | }; |
1130 | ||
49426420 | 1131 | if (mem_cgroup_oom_synchronize(true)) |
dc56401f | 1132 | return; |
3812c8c8 | 1133 | |
dc56401f JW |
1134 | if (!mutex_trylock(&oom_lock)) |
1135 | return; | |
a104808e | 1136 | out_of_memory(&oc); |
dc56401f | 1137 | mutex_unlock(&oom_lock); |
e3658932 | 1138 | } |