]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/memory-failure.c
mm/memory-failure: send SIGBUS(BUS_MCEERR_AR) only to current thread
[mirror_ubuntu-hirsute-kernel.git] / mm / memory-failure.c
CommitLineData
1439f94c 1// SPDX-License-Identifier: GPL-2.0-only
6a46079c
AK
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6a46079c 6 * High level machine check handler. Handles pages reported by the
1c80b990 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
6a46079c 8 * failure.
1c80b990
AK
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
6a46079c
AK
12 *
13 * Handles page cache pages in various states. The tricky part
1c80b990
AK
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
e0de78df
AK
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
1c80b990
AK
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
6a46079c 35 */
6a46079c
AK
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
478c5ffc 39#include <linux/kernel-page-flags.h>
3f07c014 40#include <linux/sched/signal.h>
29930025 41#include <linux/sched/task.h>
01e00f88 42#include <linux/ksm.h>
6a46079c 43#include <linux/rmap.h>
b9e15baf 44#include <linux/export.h>
6a46079c
AK
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
facb6011 48#include <linux/migrate.h>
facb6011 49#include <linux/suspend.h>
5a0e3ad6 50#include <linux/slab.h>
bf998156 51#include <linux/swapops.h>
7af446a8 52#include <linux/hugetlb.h>
20d6c96b 53#include <linux/memory_hotplug.h>
5db8a73a 54#include <linux/mm_inline.h>
6100e34b 55#include <linux/memremap.h>
ea8f5fb8 56#include <linux/kfifo.h>
a5f65109 57#include <linux/ratelimit.h>
d4ae9916 58#include <linux/page-isolation.h>
6a46079c 59#include "internal.h"
97f0b134 60#include "ras/ras_event.h"
6a46079c
AK
61
62int sysctl_memory_failure_early_kill __read_mostly = 0;
63
64int sysctl_memory_failure_recovery __read_mostly = 1;
65
293c07e3 66atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
6a46079c 67
27df5068
AK
68#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69
1bfe5feb 70u32 hwpoison_filter_enable = 0;
7c116f2b
WF
71u32 hwpoison_filter_dev_major = ~0U;
72u32 hwpoison_filter_dev_minor = ~0U;
478c5ffc
WF
73u64 hwpoison_filter_flags_mask;
74u64 hwpoison_filter_flags_value;
1bfe5feb 75EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
7c116f2b
WF
76EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
478c5ffc
WF
78EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
7c116f2b
WF
80
81static int hwpoison_filter_dev(struct page *p)
82{
83 struct address_space *mapping;
84 dev_t dev;
85
86 if (hwpoison_filter_dev_major == ~0U &&
87 hwpoison_filter_dev_minor == ~0U)
88 return 0;
89
90 /*
1c80b990 91 * page_mapping() does not accept slab pages.
7c116f2b
WF
92 */
93 if (PageSlab(p))
94 return -EINVAL;
95
96 mapping = page_mapping(p);
97 if (mapping == NULL || mapping->host == NULL)
98 return -EINVAL;
99
100 dev = mapping->host->i_sb->s_dev;
101 if (hwpoison_filter_dev_major != ~0U &&
102 hwpoison_filter_dev_major != MAJOR(dev))
103 return -EINVAL;
104 if (hwpoison_filter_dev_minor != ~0U &&
105 hwpoison_filter_dev_minor != MINOR(dev))
106 return -EINVAL;
107
108 return 0;
109}
110
478c5ffc
WF
111static int hwpoison_filter_flags(struct page *p)
112{
113 if (!hwpoison_filter_flags_mask)
114 return 0;
115
116 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
117 hwpoison_filter_flags_value)
118 return 0;
119 else
120 return -EINVAL;
121}
122
4fd466eb
AK
123/*
124 * This allows stress tests to limit test scope to a collection of tasks
125 * by putting them under some memcg. This prevents killing unrelated/important
126 * processes such as /sbin/init. Note that the target task may share clean
127 * pages with init (eg. libc text), which is harmless. If the target task
128 * share _dirty_ pages with another task B, the test scheme must make sure B
129 * is also included in the memcg. At last, due to race conditions this filter
130 * can only guarantee that the page either belongs to the memcg tasks, or is
131 * a freed page.
132 */
94a59fb3 133#ifdef CONFIG_MEMCG
4fd466eb
AK
134u64 hwpoison_filter_memcg;
135EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
136static int hwpoison_filter_task(struct page *p)
137{
4fd466eb
AK
138 if (!hwpoison_filter_memcg)
139 return 0;
140
94a59fb3 141 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
4fd466eb
AK
142 return -EINVAL;
143
144 return 0;
145}
146#else
147static int hwpoison_filter_task(struct page *p) { return 0; }
148#endif
149
7c116f2b
WF
150int hwpoison_filter(struct page *p)
151{
1bfe5feb
HL
152 if (!hwpoison_filter_enable)
153 return 0;
154
7c116f2b
WF
155 if (hwpoison_filter_dev(p))
156 return -EINVAL;
157
478c5ffc
WF
158 if (hwpoison_filter_flags(p))
159 return -EINVAL;
160
4fd466eb
AK
161 if (hwpoison_filter_task(p))
162 return -EINVAL;
163
7c116f2b
WF
164 return 0;
165}
27df5068
AK
166#else
167int hwpoison_filter(struct page *p)
168{
169 return 0;
170}
171#endif
172
7c116f2b
WF
173EXPORT_SYMBOL_GPL(hwpoison_filter);
174
ae1139ec
DW
175/*
176 * Kill all processes that have a poisoned page mapped and then isolate
177 * the page.
178 *
179 * General strategy:
180 * Find all processes having the page mapped and kill them.
181 * But we keep a page reference around so that the page is not
182 * actually freed yet.
183 * Then stash the page away
184 *
185 * There's no convenient way to get back to mapped processes
186 * from the VMAs. So do a brute-force search over all
187 * running processes.
188 *
189 * Remember that machine checks are not common (or rather
190 * if they are common you have other problems), so this shouldn't
191 * be a performance issue.
192 *
193 * Also there are some races possible while we get from the
194 * error detection to actually handle it.
195 */
196
197struct to_kill {
198 struct list_head nd;
199 struct task_struct *tsk;
200 unsigned long addr;
201 short size_shift;
ae1139ec
DW
202};
203
6a46079c 204/*
7329bbeb
TL
205 * Send all the processes who have the page mapped a signal.
206 * ``action optional'' if they are not immediately affected by the error
207 * ``action required'' if error happened in current execution context
6a46079c 208 */
ae1139ec 209static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
6a46079c 210{
ae1139ec
DW
211 struct task_struct *t = tk->tsk;
212 short addr_lsb = tk->size_shift;
872e9a20 213 int ret = 0;
6a46079c 214
03151c6e 215 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
872e9a20 216 pfn, t->comm, t->pid);
7329bbeb 217
872e9a20 218 if (flags & MF_ACTION_REQUIRED) {
03151c6e
NH
219 WARN_ON_ONCE(t != current);
220 ret = force_sig_mceerr(BUS_MCEERR_AR,
872e9a20 221 (void __user *)tk->addr, addr_lsb);
7329bbeb
TL
222 } else {
223 /*
224 * Don't use force here, it's convenient if the signal
225 * can be temporarily blocked.
226 * This could cause a loop when the user sets SIGBUS
227 * to SIG_IGN, but hopefully no one will do that?
228 */
ae1139ec 229 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
c0f45555 230 addr_lsb, t); /* synchronous? */
7329bbeb 231 }
6a46079c 232 if (ret < 0)
495367c0 233 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
1170532b 234 t->comm, t->pid, ret);
6a46079c
AK
235 return ret;
236}
237
588f9ce6
AK
238/*
239 * When a unknown page type is encountered drain as many buffers as possible
240 * in the hope to turn the page into a LRU or free page, which we can handle.
241 */
facb6011 242void shake_page(struct page *p, int access)
588f9ce6 243{
8bcb74de
NH
244 if (PageHuge(p))
245 return;
246
588f9ce6
AK
247 if (!PageSlab(p)) {
248 lru_add_drain_all();
249 if (PageLRU(p))
250 return;
c0554329 251 drain_all_pages(page_zone(p));
588f9ce6
AK
252 if (PageLRU(p) || is_free_buddy_page(p))
253 return;
254 }
facb6011 255
588f9ce6 256 /*
6b4f7799
JW
257 * Only call shrink_node_slabs here (which would also shrink
258 * other caches) if access is not potentially fatal.
588f9ce6 259 */
cb731d6c
VD
260 if (access)
261 drop_slab_node(page_to_nid(p));
588f9ce6
AK
262}
263EXPORT_SYMBOL_GPL(shake_page);
264
6100e34b
DW
265static unsigned long dev_pagemap_mapping_shift(struct page *page,
266 struct vm_area_struct *vma)
267{
268 unsigned long address = vma_address(page, vma);
269 pgd_t *pgd;
270 p4d_t *p4d;
271 pud_t *pud;
272 pmd_t *pmd;
273 pte_t *pte;
274
275 pgd = pgd_offset(vma->vm_mm, address);
276 if (!pgd_present(*pgd))
277 return 0;
278 p4d = p4d_offset(pgd, address);
279 if (!p4d_present(*p4d))
280 return 0;
281 pud = pud_offset(p4d, address);
282 if (!pud_present(*pud))
283 return 0;
284 if (pud_devmap(*pud))
285 return PUD_SHIFT;
286 pmd = pmd_offset(pud, address);
287 if (!pmd_present(*pmd))
288 return 0;
289 if (pmd_devmap(*pmd))
290 return PMD_SHIFT;
291 pte = pte_offset_map(pmd, address);
292 if (!pte_present(*pte))
293 return 0;
294 if (pte_devmap(*pte))
295 return PAGE_SHIFT;
296 return 0;
297}
6a46079c
AK
298
299/*
300 * Failure handling: if we can't find or can't kill a process there's
301 * not much we can do. We just print a message and ignore otherwise.
302 */
303
304/*
305 * Schedule a process for later kill.
306 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
6a46079c
AK
307 */
308static void add_to_kill(struct task_struct *tsk, struct page *p,
309 struct vm_area_struct *vma,
996ff7a0 310 struct list_head *to_kill)
6a46079c
AK
311{
312 struct to_kill *tk;
313
996ff7a0
JC
314 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
315 if (!tk) {
316 pr_err("Memory failure: Out of memory while machine check handling\n");
317 return;
6a46079c 318 }
996ff7a0 319
6a46079c 320 tk->addr = page_address_in_vma(p, vma);
6100e34b
DW
321 if (is_zone_device_page(p))
322 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
323 else
75068518 324 tk->size_shift = page_shift(compound_head(p));
6a46079c
AK
325
326 /*
3d7fed4a
JC
327 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
328 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
329 * so "tk->size_shift == 0" effectively checks no mapping on
330 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
331 * to a process' address space, it's possible not all N VMAs
332 * contain mappings for the page, but at least one VMA does.
333 * Only deliver SIGBUS with payload derived from the VMA that
334 * has a mapping for the page.
6a46079c 335 */
3d7fed4a 336 if (tk->addr == -EFAULT) {
495367c0 337 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
6a46079c 338 page_to_pfn(p), tsk->comm);
3d7fed4a
JC
339 } else if (tk->size_shift == 0) {
340 kfree(tk);
341 return;
6a46079c 342 }
996ff7a0 343
6a46079c
AK
344 get_task_struct(tsk);
345 tk->tsk = tsk;
346 list_add_tail(&tk->nd, to_kill);
347}
348
349/*
350 * Kill the processes that have been collected earlier.
351 *
352 * Only do anything when DOIT is set, otherwise just free the list
353 * (this is used for clean pages which do not need killing)
354 * Also when FAIL is set do a force kill because something went
355 * wrong earlier.
356 */
ae1139ec
DW
357static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
358 unsigned long pfn, int flags)
6a46079c
AK
359{
360 struct to_kill *tk, *next;
361
362 list_for_each_entry_safe (tk, next, to_kill, nd) {
6751ed65 363 if (forcekill) {
6a46079c 364 /*
af901ca1 365 * In case something went wrong with munmapping
6a46079c
AK
366 * make sure the process doesn't catch the
367 * signal and then access the memory. Just kill it.
6a46079c 368 */
3d7fed4a 369 if (fail || tk->addr == -EFAULT) {
495367c0 370 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
1170532b 371 pfn, tk->tsk->comm, tk->tsk->pid);
6376360e
NH
372 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
373 tk->tsk, PIDTYPE_PID);
6a46079c
AK
374 }
375
376 /*
377 * In theory the process could have mapped
378 * something else on the address in-between. We could
379 * check for that, but we need to tell the
380 * process anyways.
381 */
ae1139ec 382 else if (kill_proc(tk, pfn, flags) < 0)
495367c0 383 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
1170532b 384 pfn, tk->tsk->comm, tk->tsk->pid);
6a46079c
AK
385 }
386 put_task_struct(tk->tsk);
387 kfree(tk);
388 }
389}
390
3ba08129
NH
391/*
392 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
393 * on behalf of the thread group. Return task_struct of the (first found)
394 * dedicated thread if found, and return NULL otherwise.
395 *
396 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
397 * have to call rcu_read_lock/unlock() in this function.
398 */
399static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
6a46079c 400{
3ba08129
NH
401 struct task_struct *t;
402
4e018b45
NH
403 for_each_thread(tsk, t) {
404 if (t->flags & PF_MCE_PROCESS) {
405 if (t->flags & PF_MCE_EARLY)
406 return t;
407 } else {
408 if (sysctl_memory_failure_early_kill)
409 return t;
410 }
411 }
3ba08129
NH
412 return NULL;
413}
414
415/*
416 * Determine whether a given process is "early kill" process which expects
417 * to be signaled when some page under the process is hwpoisoned.
418 * Return task_struct of the dedicated thread (main thread unless explicitly
419 * specified) if the process is "early kill," and otherwise returns NULL.
03151c6e
NH
420 *
421 * Note that the above is true for Action Optional case, but not for Action
422 * Required case where SIGBUS should sent only to the current thread.
3ba08129
NH
423 */
424static struct task_struct *task_early_kill(struct task_struct *tsk,
425 int force_early)
426{
6a46079c 427 if (!tsk->mm)
3ba08129 428 return NULL;
03151c6e
NH
429 if (force_early) {
430 /*
431 * Comparing ->mm here because current task might represent
432 * a subthread, while tsk always points to the main thread.
433 */
434 if (tsk->mm == current->mm)
435 return current;
436 else
437 return NULL;
438 }
4e018b45 439 return find_early_kill_thread(tsk);
6a46079c
AK
440}
441
442/*
443 * Collect processes when the error hit an anonymous page.
444 */
445static void collect_procs_anon(struct page *page, struct list_head *to_kill,
996ff7a0 446 int force_early)
6a46079c
AK
447{
448 struct vm_area_struct *vma;
449 struct task_struct *tsk;
450 struct anon_vma *av;
bf181b9f 451 pgoff_t pgoff;
6a46079c 452
4fc3f1d6 453 av = page_lock_anon_vma_read(page);
6a46079c 454 if (av == NULL) /* Not actually mapped anymore */
9b679320
PZ
455 return;
456
a0f7a756 457 pgoff = page_to_pgoff(page);
9b679320 458 read_lock(&tasklist_lock);
6a46079c 459 for_each_process (tsk) {
5beb4930 460 struct anon_vma_chain *vmac;
3ba08129 461 struct task_struct *t = task_early_kill(tsk, force_early);
5beb4930 462
3ba08129 463 if (!t)
6a46079c 464 continue;
bf181b9f
ML
465 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
466 pgoff, pgoff) {
5beb4930 467 vma = vmac->vma;
6a46079c
AK
468 if (!page_mapped_in_vma(page, vma))
469 continue;
3ba08129 470 if (vma->vm_mm == t->mm)
996ff7a0 471 add_to_kill(t, page, vma, to_kill);
6a46079c
AK
472 }
473 }
6a46079c 474 read_unlock(&tasklist_lock);
4fc3f1d6 475 page_unlock_anon_vma_read(av);
6a46079c
AK
476}
477
478/*
479 * Collect processes when the error hit a file mapped page.
480 */
481static void collect_procs_file(struct page *page, struct list_head *to_kill,
996ff7a0 482 int force_early)
6a46079c
AK
483{
484 struct vm_area_struct *vma;
485 struct task_struct *tsk;
6a46079c
AK
486 struct address_space *mapping = page->mapping;
487
d28eb9c8 488 i_mmap_lock_read(mapping);
9b679320 489 read_lock(&tasklist_lock);
6a46079c 490 for_each_process(tsk) {
a0f7a756 491 pgoff_t pgoff = page_to_pgoff(page);
3ba08129 492 struct task_struct *t = task_early_kill(tsk, force_early);
6a46079c 493
3ba08129 494 if (!t)
6a46079c 495 continue;
6b2dbba8 496 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
6a46079c
AK
497 pgoff) {
498 /*
499 * Send early kill signal to tasks where a vma covers
500 * the page but the corrupted page is not necessarily
501 * mapped it in its pte.
502 * Assume applications who requested early kill want
503 * to be informed of all such data corruptions.
504 */
3ba08129 505 if (vma->vm_mm == t->mm)
996ff7a0 506 add_to_kill(t, page, vma, to_kill);
6a46079c
AK
507 }
508 }
6a46079c 509 read_unlock(&tasklist_lock);
d28eb9c8 510 i_mmap_unlock_read(mapping);
6a46079c
AK
511}
512
513/*
514 * Collect the processes who have the corrupted page mapped to kill.
6a46079c 515 */
74614de1
TL
516static void collect_procs(struct page *page, struct list_head *tokill,
517 int force_early)
6a46079c 518{
6a46079c
AK
519 if (!page->mapping)
520 return;
521
6a46079c 522 if (PageAnon(page))
996ff7a0 523 collect_procs_anon(page, tokill, force_early);
6a46079c 524 else
996ff7a0 525 collect_procs_file(page, tokill, force_early);
6a46079c
AK
526}
527
6a46079c 528static const char *action_name[] = {
cc637b17
XX
529 [MF_IGNORED] = "Ignored",
530 [MF_FAILED] = "Failed",
531 [MF_DELAYED] = "Delayed",
532 [MF_RECOVERED] = "Recovered",
64d37a2b
NH
533};
534
535static const char * const action_page_types[] = {
cc637b17
XX
536 [MF_MSG_KERNEL] = "reserved kernel page",
537 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
538 [MF_MSG_SLAB] = "kernel slab page",
539 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
540 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
541 [MF_MSG_HUGE] = "huge page",
542 [MF_MSG_FREE_HUGE] = "free huge page",
31286a84 543 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
cc637b17
XX
544 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
545 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
546 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
547 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
548 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
549 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
550 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
551 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
552 [MF_MSG_CLEAN_LRU] = "clean LRU page",
553 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
554 [MF_MSG_BUDDY] = "free buddy page",
555 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
6100e34b 556 [MF_MSG_DAX] = "dax page",
cc637b17 557 [MF_MSG_UNKNOWN] = "unknown page",
64d37a2b
NH
558};
559
dc2a1cbf
WF
560/*
561 * XXX: It is possible that a page is isolated from LRU cache,
562 * and then kept in swap cache or failed to remove from page cache.
563 * The page count will stop it from being freed by unpoison.
564 * Stress tests should be aware of this memory leak problem.
565 */
566static int delete_from_lru_cache(struct page *p)
567{
568 if (!isolate_lru_page(p)) {
569 /*
570 * Clear sensible page flags, so that the buddy system won't
571 * complain when the page is unpoison-and-freed.
572 */
573 ClearPageActive(p);
574 ClearPageUnevictable(p);
18365225
MH
575
576 /*
577 * Poisoned page might never drop its ref count to 0 so we have
578 * to uncharge it manually from its memcg.
579 */
580 mem_cgroup_uncharge(p);
581
dc2a1cbf
WF
582 /*
583 * drop the page count elevated by isolate_lru_page()
584 */
09cbfeaf 585 put_page(p);
dc2a1cbf
WF
586 return 0;
587 }
588 return -EIO;
589}
590
78bb9203
NH
591static int truncate_error_page(struct page *p, unsigned long pfn,
592 struct address_space *mapping)
593{
594 int ret = MF_FAILED;
595
596 if (mapping->a_ops->error_remove_page) {
597 int err = mapping->a_ops->error_remove_page(mapping, p);
598
599 if (err != 0) {
600 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
601 pfn, err);
602 } else if (page_has_private(p) &&
603 !try_to_release_page(p, GFP_NOIO)) {
604 pr_info("Memory failure: %#lx: failed to release buffers\n",
605 pfn);
606 } else {
607 ret = MF_RECOVERED;
608 }
609 } else {
610 /*
611 * If the file system doesn't support it just invalidate
612 * This fails on dirty or anything with private pages
613 */
614 if (invalidate_inode_page(p))
615 ret = MF_RECOVERED;
616 else
617 pr_info("Memory failure: %#lx: Failed to invalidate\n",
618 pfn);
619 }
620
621 return ret;
622}
623
6a46079c
AK
624/*
625 * Error hit kernel page.
626 * Do nothing, try to be lucky and not touch this instead. For a few cases we
627 * could be more sophisticated.
628 */
629static int me_kernel(struct page *p, unsigned long pfn)
6a46079c 630{
cc637b17 631 return MF_IGNORED;
6a46079c
AK
632}
633
634/*
635 * Page in unknown state. Do nothing.
636 */
637static int me_unknown(struct page *p, unsigned long pfn)
638{
495367c0 639 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
cc637b17 640 return MF_FAILED;
6a46079c
AK
641}
642
6a46079c
AK
643/*
644 * Clean (or cleaned) page cache page.
645 */
646static int me_pagecache_clean(struct page *p, unsigned long pfn)
647{
6a46079c
AK
648 struct address_space *mapping;
649
dc2a1cbf
WF
650 delete_from_lru_cache(p);
651
6a46079c
AK
652 /*
653 * For anonymous pages we're done the only reference left
654 * should be the one m_f() holds.
655 */
656 if (PageAnon(p))
cc637b17 657 return MF_RECOVERED;
6a46079c
AK
658
659 /*
660 * Now truncate the page in the page cache. This is really
661 * more like a "temporary hole punch"
662 * Don't do this for block devices when someone else
663 * has a reference, because it could be file system metadata
664 * and that's not safe to truncate.
665 */
666 mapping = page_mapping(p);
667 if (!mapping) {
668 /*
669 * Page has been teared down in the meanwhile
670 */
cc637b17 671 return MF_FAILED;
6a46079c
AK
672 }
673
674 /*
675 * Truncation is a bit tricky. Enable it per file system for now.
676 *
677 * Open: to take i_mutex or not for this? Right now we don't.
678 */
78bb9203 679 return truncate_error_page(p, pfn, mapping);
6a46079c
AK
680}
681
682/*
549543df 683 * Dirty pagecache page
6a46079c
AK
684 * Issues: when the error hit a hole page the error is not properly
685 * propagated.
686 */
687static int me_pagecache_dirty(struct page *p, unsigned long pfn)
688{
689 struct address_space *mapping = page_mapping(p);
690
691 SetPageError(p);
692 /* TBD: print more information about the file. */
693 if (mapping) {
694 /*
695 * IO error will be reported by write(), fsync(), etc.
696 * who check the mapping.
697 * This way the application knows that something went
698 * wrong with its dirty file data.
699 *
700 * There's one open issue:
701 *
702 * The EIO will be only reported on the next IO
703 * operation and then cleared through the IO map.
704 * Normally Linux has two mechanisms to pass IO error
705 * first through the AS_EIO flag in the address space
706 * and then through the PageError flag in the page.
707 * Since we drop pages on memory failure handling the
708 * only mechanism open to use is through AS_AIO.
709 *
710 * This has the disadvantage that it gets cleared on
711 * the first operation that returns an error, while
712 * the PageError bit is more sticky and only cleared
713 * when the page is reread or dropped. If an
714 * application assumes it will always get error on
715 * fsync, but does other operations on the fd before
25985edc 716 * and the page is dropped between then the error
6a46079c
AK
717 * will not be properly reported.
718 *
719 * This can already happen even without hwpoisoned
720 * pages: first on metadata IO errors (which only
721 * report through AS_EIO) or when the page is dropped
722 * at the wrong time.
723 *
724 * So right now we assume that the application DTRT on
725 * the first EIO, but we're not worse than other parts
726 * of the kernel.
727 */
af21bfaf 728 mapping_set_error(mapping, -EIO);
6a46079c
AK
729 }
730
731 return me_pagecache_clean(p, pfn);
732}
733
734/*
735 * Clean and dirty swap cache.
736 *
737 * Dirty swap cache page is tricky to handle. The page could live both in page
738 * cache and swap cache(ie. page is freshly swapped in). So it could be
739 * referenced concurrently by 2 types of PTEs:
740 * normal PTEs and swap PTEs. We try to handle them consistently by calling
741 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
742 * and then
743 * - clear dirty bit to prevent IO
744 * - remove from LRU
745 * - but keep in the swap cache, so that when we return to it on
746 * a later page fault, we know the application is accessing
747 * corrupted data and shall be killed (we installed simple
748 * interception code in do_swap_page to catch it).
749 *
750 * Clean swap cache pages can be directly isolated. A later page fault will
751 * bring in the known good data from disk.
752 */
753static int me_swapcache_dirty(struct page *p, unsigned long pfn)
754{
6a46079c
AK
755 ClearPageDirty(p);
756 /* Trigger EIO in shmem: */
757 ClearPageUptodate(p);
758
dc2a1cbf 759 if (!delete_from_lru_cache(p))
cc637b17 760 return MF_DELAYED;
dc2a1cbf 761 else
cc637b17 762 return MF_FAILED;
6a46079c
AK
763}
764
765static int me_swapcache_clean(struct page *p, unsigned long pfn)
766{
6a46079c 767 delete_from_swap_cache(p);
e43c3afb 768
dc2a1cbf 769 if (!delete_from_lru_cache(p))
cc637b17 770 return MF_RECOVERED;
dc2a1cbf 771 else
cc637b17 772 return MF_FAILED;
6a46079c
AK
773}
774
775/*
776 * Huge pages. Needs work.
777 * Issues:
93f70f90
NH
778 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
779 * To narrow down kill region to one page, we need to break up pmd.
6a46079c
AK
780 */
781static int me_huge_page(struct page *p, unsigned long pfn)
782{
6de2b1aa 783 int res = 0;
93f70f90 784 struct page *hpage = compound_head(p);
78bb9203 785 struct address_space *mapping;
2491ffee
NH
786
787 if (!PageHuge(hpage))
788 return MF_DELAYED;
789
78bb9203
NH
790 mapping = page_mapping(hpage);
791 if (mapping) {
792 res = truncate_error_page(hpage, pfn, mapping);
793 } else {
794 unlock_page(hpage);
795 /*
796 * migration entry prevents later access on error anonymous
797 * hugepage, so we can free and dissolve it into buddy to
798 * save healthy subpages.
799 */
800 if (PageAnon(hpage))
801 put_page(hpage);
802 dissolve_free_huge_page(p);
803 res = MF_RECOVERED;
804 lock_page(hpage);
93f70f90 805 }
78bb9203
NH
806
807 return res;
6a46079c
AK
808}
809
810/*
811 * Various page states we can handle.
812 *
813 * A page state is defined by its current page->flags bits.
814 * The table matches them in order and calls the right handler.
815 *
816 * This is quite tricky because we can access page at any time
25985edc 817 * in its live cycle, so all accesses have to be extremely careful.
6a46079c
AK
818 *
819 * This is not complete. More states could be added.
820 * For any missing state don't attempt recovery.
821 */
822
823#define dirty (1UL << PG_dirty)
6326fec1 824#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
6a46079c
AK
825#define unevict (1UL << PG_unevictable)
826#define mlock (1UL << PG_mlocked)
827#define writeback (1UL << PG_writeback)
828#define lru (1UL << PG_lru)
6a46079c 829#define head (1UL << PG_head)
6a46079c 830#define slab (1UL << PG_slab)
6a46079c
AK
831#define reserved (1UL << PG_reserved)
832
833static struct page_state {
834 unsigned long mask;
835 unsigned long res;
cc637b17 836 enum mf_action_page_type type;
6a46079c
AK
837 int (*action)(struct page *p, unsigned long pfn);
838} error_states[] = {
cc637b17 839 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
95d01fc6
WF
840 /*
841 * free pages are specially detected outside this table:
842 * PG_buddy pages only make a small fraction of all free pages.
843 */
6a46079c
AK
844
845 /*
846 * Could in theory check if slab page is free or if we can drop
847 * currently unused objects without touching them. But just
848 * treat it as standard kernel for now.
849 */
cc637b17 850 { slab, slab, MF_MSG_SLAB, me_kernel },
6a46079c 851
cc637b17 852 { head, head, MF_MSG_HUGE, me_huge_page },
6a46079c 853
cc637b17
XX
854 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
855 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
6a46079c 856
cc637b17
XX
857 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
858 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
6a46079c 859
cc637b17
XX
860 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
861 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
5f4b9fc5 862
cc637b17
XX
863 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
864 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
6a46079c
AK
865
866 /*
867 * Catchall entry: must be at end.
868 */
cc637b17 869 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
6a46079c
AK
870};
871
2326c467
AK
872#undef dirty
873#undef sc
874#undef unevict
875#undef mlock
876#undef writeback
877#undef lru
2326c467 878#undef head
2326c467
AK
879#undef slab
880#undef reserved
881
ff604cf6
NH
882/*
883 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
884 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
885 */
cc3e2af4
XX
886static void action_result(unsigned long pfn, enum mf_action_page_type type,
887 enum mf_result result)
6a46079c 888{
97f0b134
XX
889 trace_memory_failure_event(pfn, type, result);
890
495367c0 891 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
64d37a2b 892 pfn, action_page_types[type], action_name[result]);
6a46079c
AK
893}
894
895static int page_action(struct page_state *ps, struct page *p,
bd1ce5f9 896 unsigned long pfn)
6a46079c
AK
897{
898 int result;
7456b040 899 int count;
6a46079c
AK
900
901 result = ps->action(p, pfn);
7456b040 902
bd1ce5f9 903 count = page_count(p) - 1;
cc637b17 904 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
138ce286 905 count--;
78bb9203 906 if (count > 0) {
495367c0 907 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
64d37a2b 908 pfn, action_page_types[ps->type], count);
cc637b17 909 result = MF_FAILED;
138ce286 910 }
64d37a2b 911 action_result(pfn, ps->type, result);
6a46079c
AK
912
913 /* Could do more checks here if page looks ok */
914 /*
915 * Could adjust zone counters here to correct for the missing page.
916 */
917
cc637b17 918 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
6a46079c
AK
919}
920
ead07f6a
NH
921/**
922 * get_hwpoison_page() - Get refcount for memory error handling:
923 * @page: raw error page (hit by memory error)
924 *
925 * Return: return 0 if failed to grab the refcount, otherwise true (some
926 * non-zero value.)
927 */
928int get_hwpoison_page(struct page *page)
929{
930 struct page *head = compound_head(page);
931
4e41a30c 932 if (!PageHuge(head) && PageTransHuge(head)) {
98ed2b00
NH
933 /*
934 * Non anonymous thp exists only in allocation/free time. We
935 * can't handle such a case correctly, so let's give it up.
936 * This should be better than triggering BUG_ON when kernel
937 * tries to touch the "partially handled" page.
938 */
939 if (!PageAnon(head)) {
495367c0 940 pr_err("Memory failure: %#lx: non anonymous thp\n",
98ed2b00
NH
941 page_to_pfn(page));
942 return 0;
943 }
ead07f6a
NH
944 }
945
c2e7e00b
KK
946 if (get_page_unless_zero(head)) {
947 if (head == compound_head(page))
948 return 1;
949
495367c0
CY
950 pr_info("Memory failure: %#lx cannot catch tail\n",
951 page_to_pfn(page));
c2e7e00b
KK
952 put_page(head);
953 }
954
955 return 0;
ead07f6a
NH
956}
957EXPORT_SYMBOL_GPL(get_hwpoison_page);
958
6a46079c
AK
959/*
960 * Do all that is necessary to remove user space mappings. Unmap
961 * the pages and send SIGBUS to the processes if the data was dirty.
962 */
666e5a40 963static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
83b57531 964 int flags, struct page **hpagep)
6a46079c 965{
a128ca71 966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
6a46079c
AK
967 struct address_space *mapping;
968 LIST_HEAD(tokill);
c0d0381a 969 bool unmap_success = true;
6751ed65 970 int kill = 1, forcekill;
54b9dd14 971 struct page *hpage = *hpagep;
286c469a 972 bool mlocked = PageMlocked(hpage);
6a46079c 973
93a9eb39
NH
974 /*
975 * Here we are interested only in user-mapped pages, so skip any
976 * other types of pages.
977 */
978 if (PageReserved(p) || PageSlab(p))
666e5a40 979 return true;
93a9eb39 980 if (!(PageLRU(hpage) || PageHuge(p)))
666e5a40 981 return true;
6a46079c 982
6a46079c
AK
983 /*
984 * This check implies we don't kill processes if their pages
985 * are in the swap cache early. Those are always late kills.
986 */
7af446a8 987 if (!page_mapped(hpage))
666e5a40 988 return true;
1668bfd5 989
52089b14 990 if (PageKsm(p)) {
495367c0 991 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
666e5a40 992 return false;
52089b14 993 }
6a46079c
AK
994
995 if (PageSwapCache(p)) {
495367c0
CY
996 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
997 pfn);
6a46079c
AK
998 ttu |= TTU_IGNORE_HWPOISON;
999 }
1000
1001 /*
1002 * Propagate the dirty bit from PTEs to struct page first, because we
1003 * need this to decide if we should kill or just drop the page.
db0480b3
WF
1004 * XXX: the dirty test could be racy: set_page_dirty() may not always
1005 * be called inside page lock (it's recommended but not enforced).
6a46079c 1006 */
7af446a8 1007 mapping = page_mapping(hpage);
6751ed65 1008 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
7af446a8
NH
1009 mapping_cap_writeback_dirty(mapping)) {
1010 if (page_mkclean(hpage)) {
1011 SetPageDirty(hpage);
6a46079c
AK
1012 } else {
1013 kill = 0;
1014 ttu |= TTU_IGNORE_HWPOISON;
495367c0 1015 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
6a46079c
AK
1016 pfn);
1017 }
1018 }
1019
1020 /*
1021 * First collect all the processes that have the page
1022 * mapped in dirty form. This has to be done before try_to_unmap,
1023 * because ttu takes the rmap data structures down.
1024 *
1025 * Error handling: We ignore errors here because
1026 * there's nothing that can be done.
1027 */
1028 if (kill)
415c64c1 1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
6a46079c 1030
c0d0381a
MK
1031 if (!PageHuge(hpage)) {
1032 unmap_success = try_to_unmap(hpage, ttu);
1033 } else {
1034 /*
1035 * For hugetlb pages, try_to_unmap could potentially call
1036 * huge_pmd_unshare. Because of this, take semaphore in
1037 * write mode here and set TTU_RMAP_LOCKED to indicate we
1038 * have taken the lock at this higer level.
1039 *
1040 * Note that the call to hugetlb_page_mapping_lock_write
1041 * is necessary even if mapping is already set. It handles
1042 * ugliness of potentially having to drop page lock to obtain
1043 * i_mmap_rwsem.
1044 */
1045 mapping = hugetlb_page_mapping_lock_write(hpage);
1046
1047 if (mapping) {
1048 unmap_success = try_to_unmap(hpage,
1049 ttu|TTU_RMAP_LOCKED);
1050 i_mmap_unlock_write(mapping);
1051 } else {
1052 pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n",
1053 pfn);
1054 unmap_success = false;
1055 }
1056 }
666e5a40 1057 if (!unmap_success)
495367c0 1058 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1170532b 1059 pfn, page_mapcount(hpage));
a6d30ddd 1060
286c469a
NH
1061 /*
1062 * try_to_unmap() might put mlocked page in lru cache, so call
1063 * shake_page() again to ensure that it's flushed.
1064 */
1065 if (mlocked)
1066 shake_page(hpage, 0);
1067
6a46079c
AK
1068 /*
1069 * Now that the dirty bit has been propagated to the
1070 * struct page and all unmaps done we can decide if
1071 * killing is needed or not. Only kill when the page
6751ed65
TL
1072 * was dirty or the process is not restartable,
1073 * otherwise the tokill list is merely
6a46079c
AK
1074 * freed. When there was a problem unmapping earlier
1075 * use a more force-full uncatchable kill to prevent
1076 * any accesses to the poisoned memory.
1077 */
415c64c1 1078 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
ae1139ec 1079 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1668bfd5 1080
666e5a40 1081 return unmap_success;
6a46079c
AK
1082}
1083
0348d2eb
NH
1084static int identify_page_state(unsigned long pfn, struct page *p,
1085 unsigned long page_flags)
761ad8d7
NH
1086{
1087 struct page_state *ps;
0348d2eb
NH
1088
1089 /*
1090 * The first check uses the current page flags which may not have any
1091 * relevant information. The second check with the saved page flags is
1092 * carried out only if the first check can't determine the page status.
1093 */
1094 for (ps = error_states;; ps++)
1095 if ((p->flags & ps->mask) == ps->res)
1096 break;
1097
1098 page_flags |= (p->flags & (1UL << PG_dirty));
1099
1100 if (!ps->mask)
1101 for (ps = error_states;; ps++)
1102 if ((page_flags & ps->mask) == ps->res)
1103 break;
1104 return page_action(ps, p, pfn);
1105}
1106
83b57531 1107static int memory_failure_hugetlb(unsigned long pfn, int flags)
0348d2eb 1108{
761ad8d7
NH
1109 struct page *p = pfn_to_page(pfn);
1110 struct page *head = compound_head(p);
1111 int res;
1112 unsigned long page_flags;
1113
1114 if (TestSetPageHWPoison(head)) {
1115 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1116 pfn);
1117 return 0;
1118 }
1119
1120 num_poisoned_pages_inc();
1121
1122 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1123 /*
1124 * Check "filter hit" and "race with other subpage."
1125 */
1126 lock_page(head);
1127 if (PageHWPoison(head)) {
1128 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1129 || (p != head && TestSetPageHWPoison(head))) {
1130 num_poisoned_pages_dec();
1131 unlock_page(head);
1132 return 0;
1133 }
1134 }
1135 unlock_page(head);
1136 dissolve_free_huge_page(p);
1137 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1138 return 0;
1139 }
1140
1141 lock_page(head);
1142 page_flags = head->flags;
1143
1144 if (!PageHWPoison(head)) {
1145 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1146 num_poisoned_pages_dec();
1147 unlock_page(head);
1148 put_hwpoison_page(head);
1149 return 0;
1150 }
1151
31286a84
NH
1152 /*
1153 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1154 * simply disable it. In order to make it work properly, we need
1155 * make sure that:
1156 * - conversion of a pud that maps an error hugetlb into hwpoison
1157 * entry properly works, and
1158 * - other mm code walking over page table is aware of pud-aligned
1159 * hwpoison entries.
1160 */
1161 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1162 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1163 res = -EBUSY;
1164 goto out;
1165 }
1166
83b57531 1167 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
761ad8d7
NH
1168 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1169 res = -EBUSY;
1170 goto out;
1171 }
1172
0348d2eb 1173 res = identify_page_state(pfn, p, page_flags);
761ad8d7
NH
1174out:
1175 unlock_page(head);
1176 return res;
1177}
1178
6100e34b
DW
1179static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1180 struct dev_pagemap *pgmap)
1181{
1182 struct page *page = pfn_to_page(pfn);
1183 const bool unmap_success = true;
1184 unsigned long size = 0;
1185 struct to_kill *tk;
1186 LIST_HEAD(tokill);
1187 int rc = -EBUSY;
1188 loff_t start;
27359fd6 1189 dax_entry_t cookie;
6100e34b
DW
1190
1191 /*
1192 * Prevent the inode from being freed while we are interrogating
1193 * the address_space, typically this would be handled by
1194 * lock_page(), but dax pages do not use the page lock. This
1195 * also prevents changes to the mapping of this pfn until
1196 * poison signaling is complete.
1197 */
27359fd6
MW
1198 cookie = dax_lock_page(page);
1199 if (!cookie)
6100e34b
DW
1200 goto out;
1201
1202 if (hwpoison_filter(page)) {
1203 rc = 0;
1204 goto unlock;
1205 }
1206
25b2995a 1207 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
6100e34b
DW
1208 /*
1209 * TODO: Handle HMM pages which may need coordination
1210 * with device-side memory.
1211 */
1212 goto unlock;
6100e34b
DW
1213 }
1214
1215 /*
1216 * Use this flag as an indication that the dax page has been
1217 * remapped UC to prevent speculative consumption of poison.
1218 */
1219 SetPageHWPoison(page);
1220
1221 /*
1222 * Unlike System-RAM there is no possibility to swap in a
1223 * different physical page at a given virtual address, so all
1224 * userspace consumption of ZONE_DEVICE memory necessitates
1225 * SIGBUS (i.e. MF_MUST_KILL)
1226 */
1227 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1228 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1229
1230 list_for_each_entry(tk, &tokill, nd)
1231 if (tk->size_shift)
1232 size = max(size, 1UL << tk->size_shift);
1233 if (size) {
1234 /*
1235 * Unmap the largest mapping to avoid breaking up
1236 * device-dax mappings which are constant size. The
1237 * actual size of the mapping being torn down is
1238 * communicated in siginfo, see kill_proc()
1239 */
1240 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1241 unmap_mapping_range(page->mapping, start, start + size, 0);
1242 }
1243 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1244 rc = 0;
1245unlock:
27359fd6 1246 dax_unlock_page(page, cookie);
6100e34b
DW
1247out:
1248 /* drop pgmap ref acquired in caller */
1249 put_dev_pagemap(pgmap);
1250 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1251 return rc;
1252}
1253
cd42f4a3
TL
1254/**
1255 * memory_failure - Handle memory failure of a page.
1256 * @pfn: Page Number of the corrupted page
cd42f4a3
TL
1257 * @flags: fine tune action taken
1258 *
1259 * This function is called by the low level machine check code
1260 * of an architecture when it detects hardware memory corruption
1261 * of a page. It tries its best to recover, which includes
1262 * dropping pages, killing processes etc.
1263 *
1264 * The function is primarily of use for corruptions that
1265 * happen outside the current execution context (e.g. when
1266 * detected by a background scrubber)
1267 *
1268 * Must run in process context (e.g. a work queue) with interrupts
1269 * enabled and no spinlocks hold.
1270 */
83b57531 1271int memory_failure(unsigned long pfn, int flags)
6a46079c 1272{
6a46079c 1273 struct page *p;
7af446a8 1274 struct page *hpage;
415c64c1 1275 struct page *orig_head;
6100e34b 1276 struct dev_pagemap *pgmap;
6a46079c 1277 int res;
524fca1e 1278 unsigned long page_flags;
6a46079c
AK
1279
1280 if (!sysctl_memory_failure_recovery)
83b57531 1281 panic("Memory failure on page %lx", pfn);
6a46079c 1282
96c804a6
DH
1283 p = pfn_to_online_page(pfn);
1284 if (!p) {
1285 if (pfn_valid(pfn)) {
1286 pgmap = get_dev_pagemap(pfn, NULL);
1287 if (pgmap)
1288 return memory_failure_dev_pagemap(pfn, flags,
1289 pgmap);
1290 }
495367c0
CY
1291 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1292 pfn);
a7560fc8 1293 return -ENXIO;
6a46079c
AK
1294 }
1295
761ad8d7 1296 if (PageHuge(p))
83b57531 1297 return memory_failure_hugetlb(pfn, flags);
6a46079c 1298 if (TestSetPageHWPoison(p)) {
495367c0
CY
1299 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1300 pfn);
6a46079c
AK
1301 return 0;
1302 }
1303
761ad8d7 1304 orig_head = hpage = compound_head(p);
b37ff71c 1305 num_poisoned_pages_inc();
6a46079c
AK
1306
1307 /*
1308 * We need/can do nothing about count=0 pages.
1309 * 1) it's a free page, and therefore in safe hand:
1310 * prep_new_page() will be the gate keeper.
761ad8d7 1311 * 2) it's part of a non-compound high order page.
6a46079c
AK
1312 * Implies some kernel user: cannot stop them from
1313 * R/W the page; let's pray that the page has been
1314 * used and will be freed some time later.
1315 * In fact it's dangerous to directly bump up page count from 0,
1c4c3b99 1316 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
6a46079c 1317 */
ead07f6a 1318 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
8d22ba1b 1319 if (is_free_buddy_page(p)) {
cc637b17 1320 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
8d22ba1b
WF
1321 return 0;
1322 } else {
cc637b17 1323 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
8d22ba1b
WF
1324 return -EBUSY;
1325 }
6a46079c
AK
1326 }
1327
761ad8d7 1328 if (PageTransHuge(hpage)) {
c3901e72
NH
1329 lock_page(p);
1330 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1331 unlock_page(p);
1332 if (!PageAnon(p))
495367c0
CY
1333 pr_err("Memory failure: %#lx: non anonymous thp\n",
1334 pfn);
7f6bf39b 1335 else
495367c0
CY
1336 pr_err("Memory failure: %#lx: thp split failed\n",
1337 pfn);
ead07f6a 1338 if (TestClearPageHWPoison(p))
b37ff71c 1339 num_poisoned_pages_dec();
665d9da7 1340 put_hwpoison_page(p);
415c64c1
NH
1341 return -EBUSY;
1342 }
c3901e72 1343 unlock_page(p);
415c64c1
NH
1344 VM_BUG_ON_PAGE(!page_count(p), p);
1345 hpage = compound_head(p);
1346 }
1347
e43c3afb
WF
1348 /*
1349 * We ignore non-LRU pages for good reasons.
1350 * - PG_locked is only well defined for LRU pages and a few others
48c935ad 1351 * - to avoid races with __SetPageLocked()
e43c3afb
WF
1352 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1353 * The check (unnecessarily) ignores LRU pages being isolated and
1354 * walked by the page reclaim code, however that's not a big loss.
1355 */
8bcb74de
NH
1356 shake_page(p, 0);
1357 /* shake_page could have turned it free. */
1358 if (!PageLRU(p) && is_free_buddy_page(p)) {
1359 if (flags & MF_COUNT_INCREASED)
1360 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1361 else
1362 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1363 return 0;
e43c3afb 1364 }
e43c3afb 1365
761ad8d7 1366 lock_page(p);
847ce401 1367
f37d4298
AK
1368 /*
1369 * The page could have changed compound pages during the locking.
1370 * If this happens just bail out.
1371 */
415c64c1 1372 if (PageCompound(p) && compound_head(p) != orig_head) {
cc637b17 1373 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
f37d4298
AK
1374 res = -EBUSY;
1375 goto out;
1376 }
1377
524fca1e
NH
1378 /*
1379 * We use page flags to determine what action should be taken, but
1380 * the flags can be modified by the error containment action. One
1381 * example is an mlocked page, where PG_mlocked is cleared by
1382 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1383 * correctly, we save a copy of the page flags at this time.
1384 */
7258ae5c
JM
1385 if (PageHuge(p))
1386 page_flags = hpage->flags;
1387 else
1388 page_flags = p->flags;
524fca1e 1389
847ce401
WF
1390 /*
1391 * unpoison always clear PG_hwpoison inside page lock
1392 */
1393 if (!PageHWPoison(p)) {
495367c0 1394 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
b37ff71c 1395 num_poisoned_pages_dec();
761ad8d7
NH
1396 unlock_page(p);
1397 put_hwpoison_page(p);
a09233f3 1398 return 0;
847ce401 1399 }
7c116f2b
WF
1400 if (hwpoison_filter(p)) {
1401 if (TestClearPageHWPoison(p))
b37ff71c 1402 num_poisoned_pages_dec();
761ad8d7
NH
1403 unlock_page(p);
1404 put_hwpoison_page(p);
7c116f2b
WF
1405 return 0;
1406 }
847ce401 1407
761ad8d7 1408 if (!PageTransTail(p) && !PageLRU(p))
0bc1f8b0
CY
1409 goto identify_page_state;
1410
6edd6cc6
NH
1411 /*
1412 * It's very difficult to mess with pages currently under IO
1413 * and in many cases impossible, so we just avoid it here.
1414 */
6a46079c
AK
1415 wait_on_page_writeback(p);
1416
1417 /*
1418 * Now take care of user space mappings.
e64a782f 1419 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
54b9dd14
NH
1420 *
1421 * When the raw error page is thp tail page, hpage points to the raw
1422 * page after thp split.
6a46079c 1423 */
83b57531 1424 if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
cc637b17 1425 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1668bfd5
WF
1426 res = -EBUSY;
1427 goto out;
1428 }
6a46079c
AK
1429
1430 /*
1431 * Torn down by someone else?
1432 */
dc2a1cbf 1433 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
cc637b17 1434 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
d95ea51e 1435 res = -EBUSY;
6a46079c
AK
1436 goto out;
1437 }
1438
0bc1f8b0 1439identify_page_state:
0348d2eb 1440 res = identify_page_state(pfn, p, page_flags);
6a46079c 1441out:
761ad8d7 1442 unlock_page(p);
6a46079c
AK
1443 return res;
1444}
cd42f4a3 1445EXPORT_SYMBOL_GPL(memory_failure);
847ce401 1446
ea8f5fb8
HY
1447#define MEMORY_FAILURE_FIFO_ORDER 4
1448#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1449
1450struct memory_failure_entry {
1451 unsigned long pfn;
ea8f5fb8
HY
1452 int flags;
1453};
1454
1455struct memory_failure_cpu {
1456 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1457 MEMORY_FAILURE_FIFO_SIZE);
1458 spinlock_t lock;
1459 struct work_struct work;
1460};
1461
1462static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1463
1464/**
1465 * memory_failure_queue - Schedule handling memory failure of a page.
1466 * @pfn: Page Number of the corrupted page
ea8f5fb8
HY
1467 * @flags: Flags for memory failure handling
1468 *
1469 * This function is called by the low level hardware error handler
1470 * when it detects hardware memory corruption of a page. It schedules
1471 * the recovering of error page, including dropping pages, killing
1472 * processes etc.
1473 *
1474 * The function is primarily of use for corruptions that
1475 * happen outside the current execution context (e.g. when
1476 * detected by a background scrubber)
1477 *
1478 * Can run in IRQ context.
1479 */
83b57531 1480void memory_failure_queue(unsigned long pfn, int flags)
ea8f5fb8
HY
1481{
1482 struct memory_failure_cpu *mf_cpu;
1483 unsigned long proc_flags;
1484 struct memory_failure_entry entry = {
1485 .pfn = pfn,
ea8f5fb8
HY
1486 .flags = flags,
1487 };
1488
1489 mf_cpu = &get_cpu_var(memory_failure_cpu);
1490 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
498d319b 1491 if (kfifo_put(&mf_cpu->fifo, entry))
ea8f5fb8
HY
1492 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1493 else
8e33a52f 1494 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
ea8f5fb8
HY
1495 pfn);
1496 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1497 put_cpu_var(memory_failure_cpu);
1498}
1499EXPORT_SYMBOL_GPL(memory_failure_queue);
1500
1501static void memory_failure_work_func(struct work_struct *work)
1502{
1503 struct memory_failure_cpu *mf_cpu;
1504 struct memory_failure_entry entry = { 0, };
1505 unsigned long proc_flags;
1506 int gotten;
1507
06202231 1508 mf_cpu = container_of(work, struct memory_failure_cpu, work);
ea8f5fb8
HY
1509 for (;;) {
1510 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1511 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1512 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1513 if (!gotten)
1514 break;
cf870c70 1515 if (entry.flags & MF_SOFT_OFFLINE)
feec24a6 1516 soft_offline_page(entry.pfn, entry.flags);
cf870c70 1517 else
83b57531 1518 memory_failure(entry.pfn, entry.flags);
ea8f5fb8
HY
1519 }
1520}
1521
06202231
JM
1522/*
1523 * Process memory_failure work queued on the specified CPU.
1524 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
1525 */
1526void memory_failure_queue_kick(int cpu)
1527{
1528 struct memory_failure_cpu *mf_cpu;
1529
1530 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1531 cancel_work_sync(&mf_cpu->work);
1532 memory_failure_work_func(&mf_cpu->work);
1533}
1534
ea8f5fb8
HY
1535static int __init memory_failure_init(void)
1536{
1537 struct memory_failure_cpu *mf_cpu;
1538 int cpu;
1539
1540 for_each_possible_cpu(cpu) {
1541 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1542 spin_lock_init(&mf_cpu->lock);
1543 INIT_KFIFO(mf_cpu->fifo);
1544 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1545 }
1546
1547 return 0;
1548}
1549core_initcall(memory_failure_init);
1550
a5f65109
NH
1551#define unpoison_pr_info(fmt, pfn, rs) \
1552({ \
1553 if (__ratelimit(rs)) \
1554 pr_info(fmt, pfn); \
1555})
1556
847ce401
WF
1557/**
1558 * unpoison_memory - Unpoison a previously poisoned page
1559 * @pfn: Page number of the to be unpoisoned page
1560 *
1561 * Software-unpoison a page that has been poisoned by
1562 * memory_failure() earlier.
1563 *
1564 * This is only done on the software-level, so it only works
1565 * for linux injected failures, not real hardware failures
1566 *
1567 * Returns 0 for success, otherwise -errno.
1568 */
1569int unpoison_memory(unsigned long pfn)
1570{
1571 struct page *page;
1572 struct page *p;
1573 int freeit = 0;
a5f65109
NH
1574 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1575 DEFAULT_RATELIMIT_BURST);
847ce401
WF
1576
1577 if (!pfn_valid(pfn))
1578 return -ENXIO;
1579
1580 p = pfn_to_page(pfn);
1581 page = compound_head(p);
1582
1583 if (!PageHWPoison(p)) {
495367c0 1584 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
a5f65109 1585 pfn, &unpoison_rs);
847ce401
WF
1586 return 0;
1587 }
1588
230ac719 1589 if (page_count(page) > 1) {
495367c0 1590 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
a5f65109 1591 pfn, &unpoison_rs);
230ac719
NH
1592 return 0;
1593 }
1594
1595 if (page_mapped(page)) {
495367c0 1596 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
a5f65109 1597 pfn, &unpoison_rs);
230ac719
NH
1598 return 0;
1599 }
1600
1601 if (page_mapping(page)) {
495367c0 1602 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
a5f65109 1603 pfn, &unpoison_rs);
230ac719
NH
1604 return 0;
1605 }
1606
0cea3fdc
WL
1607 /*
1608 * unpoison_memory() can encounter thp only when the thp is being
1609 * worked by memory_failure() and the page lock is not held yet.
1610 * In such case, we yield to memory_failure() and make unpoison fail.
1611 */
e76d30e2 1612 if (!PageHuge(page) && PageTransHuge(page)) {
495367c0 1613 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
a5f65109 1614 pfn, &unpoison_rs);
ead07f6a 1615 return 0;
0cea3fdc
WL
1616 }
1617
ead07f6a 1618 if (!get_hwpoison_page(p)) {
847ce401 1619 if (TestClearPageHWPoison(p))
8e30456b 1620 num_poisoned_pages_dec();
495367c0 1621 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
a5f65109 1622 pfn, &unpoison_rs);
847ce401
WF
1623 return 0;
1624 }
1625
7eaceacc 1626 lock_page(page);
847ce401
WF
1627 /*
1628 * This test is racy because PG_hwpoison is set outside of page lock.
1629 * That's acceptable because that won't trigger kernel panic. Instead,
1630 * the PG_hwpoison page will be caught and isolated on the entrance to
1631 * the free buddy page pool.
1632 */
c9fbdd5f 1633 if (TestClearPageHWPoison(page)) {
495367c0 1634 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
a5f65109 1635 pfn, &unpoison_rs);
b37ff71c 1636 num_poisoned_pages_dec();
847ce401
WF
1637 freeit = 1;
1638 }
1639 unlock_page(page);
1640
665d9da7 1641 put_hwpoison_page(page);
3ba5eebc 1642 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
665d9da7 1643 put_hwpoison_page(page);
847ce401
WF
1644
1645 return 0;
1646}
1647EXPORT_SYMBOL(unpoison_memory);
facb6011 1648
666feb21 1649static struct page *new_page(struct page *p, unsigned long private)
facb6011 1650{
12686d15 1651 int nid = page_to_nid(p);
94310cbc 1652
ef77ba5c 1653 return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
facb6011
AK
1654}
1655
1656/*
1657 * Safely get reference count of an arbitrary page.
1658 * Returns 0 for a free page, -EIO for a zero refcount page
1659 * that is not free, and 1 for any other page type.
1660 * For 1 the page is returned with increased page count, otherwise not.
1661 */
af8fae7c 1662static int __get_any_page(struct page *p, unsigned long pfn, int flags)
facb6011
AK
1663{
1664 int ret;
1665
1666 if (flags & MF_COUNT_INCREASED)
1667 return 1;
1668
d950b958
NH
1669 /*
1670 * When the target page is a free hugepage, just remove it
1671 * from free hugepage list.
1672 */
ead07f6a 1673 if (!get_hwpoison_page(p)) {
d950b958 1674 if (PageHuge(p)) {
71dd0b8a 1675 pr_info("%s: %#lx free huge page\n", __func__, pfn);
af8fae7c 1676 ret = 0;
d950b958 1677 } else if (is_free_buddy_page(p)) {
71dd0b8a 1678 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
facb6011
AK
1679 ret = 0;
1680 } else {
71dd0b8a
BP
1681 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1682 __func__, pfn, p->flags);
facb6011
AK
1683 ret = -EIO;
1684 }
1685 } else {
1686 /* Not a free page */
1687 ret = 1;
1688 }
facb6011
AK
1689 return ret;
1690}
1691
af8fae7c
NH
1692static int get_any_page(struct page *page, unsigned long pfn, int flags)
1693{
1694 int ret = __get_any_page(page, pfn, flags);
1695
85fbe5d1
YX
1696 if (ret == 1 && !PageHuge(page) &&
1697 !PageLRU(page) && !__PageMovable(page)) {
af8fae7c
NH
1698 /*
1699 * Try to free it.
1700 */
665d9da7 1701 put_hwpoison_page(page);
af8fae7c
NH
1702 shake_page(page, 1);
1703
1704 /*
1705 * Did it turn free?
1706 */
1707 ret = __get_any_page(page, pfn, 0);
d96b339f 1708 if (ret == 1 && !PageLRU(page)) {
4f32be67 1709 /* Drop page reference which is from __get_any_page() */
665d9da7 1710 put_hwpoison_page(page);
82a2481e
AK
1711 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1712 pfn, page->flags, &page->flags);
af8fae7c
NH
1713 return -EIO;
1714 }
1715 }
1716 return ret;
1717}
1718
d950b958
NH
1719static int soft_offline_huge_page(struct page *page, int flags)
1720{
1721 int ret;
1722 unsigned long pfn = page_to_pfn(page);
1723 struct page *hpage = compound_head(page);
b8ec1cee 1724 LIST_HEAD(pagelist);
d950b958 1725
af8fae7c
NH
1726 /*
1727 * This double-check of PageHWPoison is to avoid the race with
1728 * memory_failure(). See also comment in __soft_offline_page().
1729 */
1730 lock_page(hpage);
0ebff32c 1731 if (PageHWPoison(hpage)) {
af8fae7c 1732 unlock_page(hpage);
665d9da7 1733 put_hwpoison_page(hpage);
0ebff32c 1734 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
af8fae7c 1735 return -EBUSY;
0ebff32c 1736 }
af8fae7c 1737 unlock_page(hpage);
d950b958 1738
bcc54222 1739 ret = isolate_huge_page(hpage, &pagelist);
03613808
WL
1740 /*
1741 * get_any_page() and isolate_huge_page() takes a refcount each,
1742 * so need to drop one here.
1743 */
665d9da7 1744 put_hwpoison_page(hpage);
03613808 1745 if (!ret) {
bcc54222
NH
1746 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1747 return -EBUSY;
1748 }
1749
68711a74 1750 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
b8ec1cee 1751 MIGRATE_SYNC, MR_MEMORY_FAILURE);
d950b958 1752 if (ret) {
b6b18aa8 1753 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
82a2481e 1754 pfn, ret, page->flags, &page->flags);
30809f55
PA
1755 if (!list_empty(&pagelist))
1756 putback_movable_pages(&pagelist);
b8ec1cee
NH
1757 if (ret > 0)
1758 ret = -EIO;
af8fae7c 1759 } else {
6bc9b564
NH
1760 /*
1761 * We set PG_hwpoison only when the migration source hugepage
1762 * was successfully dissolved, because otherwise hwpoisoned
1763 * hugepage remains on free hugepage list, then userspace will
1764 * find it as SIGBUS by allocation failure. That's not expected
1765 * in soft-offlining.
1766 */
1767 ret = dissolve_free_huge_page(page);
1768 if (!ret) {
1769 if (set_hwpoison_free_buddy_page(page))
1770 num_poisoned_pages_inc();
b38e5962
NH
1771 else
1772 ret = -EBUSY;
6bc9b564 1773 }
d950b958 1774 }
d950b958
NH
1775 return ret;
1776}
1777
af8fae7c
NH
1778static int __soft_offline_page(struct page *page, int flags)
1779{
1780 int ret;
1781 unsigned long pfn = page_to_pfn(page);
facb6011 1782
facb6011 1783 /*
af8fae7c
NH
1784 * Check PageHWPoison again inside page lock because PageHWPoison
1785 * is set by memory_failure() outside page lock. Note that
1786 * memory_failure() also double-checks PageHWPoison inside page lock,
1787 * so there's no race between soft_offline_page() and memory_failure().
facb6011 1788 */
0ebff32c
XQ
1789 lock_page(page);
1790 wait_on_page_writeback(page);
af8fae7c
NH
1791 if (PageHWPoison(page)) {
1792 unlock_page(page);
665d9da7 1793 put_hwpoison_page(page);
af8fae7c
NH
1794 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1795 return -EBUSY;
1796 }
facb6011
AK
1797 /*
1798 * Try to invalidate first. This should work for
1799 * non dirty unmapped page cache pages.
1800 */
1801 ret = invalidate_inode_page(page);
1802 unlock_page(page);
facb6011 1803 /*
facb6011
AK
1804 * RED-PEN would be better to keep it isolated here, but we
1805 * would need to fix isolation locking first.
1806 */
facb6011 1807 if (ret == 1) {
665d9da7 1808 put_hwpoison_page(page);
fb46e735 1809 pr_info("soft_offline: %#lx: invalidated\n", pfn);
af8fae7c 1810 SetPageHWPoison(page);
8e30456b 1811 num_poisoned_pages_inc();
af8fae7c 1812 return 0;
facb6011
AK
1813 }
1814
1815 /*
1816 * Simple invalidation didn't work.
1817 * Try to migrate to a new page instead. migrate.c
1818 * handles a large number of cases for us.
1819 */
85fbe5d1
YX
1820 if (PageLRU(page))
1821 ret = isolate_lru_page(page);
1822 else
1823 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
bd486285
KK
1824 /*
1825 * Drop page reference which is came from get_any_page()
1826 * successful isolate_lru_page() already took another one.
1827 */
665d9da7 1828 put_hwpoison_page(page);
facb6011
AK
1829 if (!ret) {
1830 LIST_HEAD(pagelist);
85fbe5d1
YX
1831 /*
1832 * After isolated lru page, the PageLRU will be cleared,
1833 * so use !__PageMovable instead for LRU page's mapping
1834 * cannot have PAGE_MAPPING_MOVABLE.
1835 */
1836 if (!__PageMovable(page))
1837 inc_node_page_state(page, NR_ISOLATED_ANON +
9de4f22a 1838 page_is_file_lru(page));
facb6011 1839 list_add(&page->lru, &pagelist);
68711a74 1840 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
9c620e2b 1841 MIGRATE_SYNC, MR_MEMORY_FAILURE);
facb6011 1842 if (ret) {
85fbe5d1
YX
1843 if (!list_empty(&pagelist))
1844 putback_movable_pages(&pagelist);
59c82b70 1845
82a2481e
AK
1846 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1847 pfn, ret, page->flags, &page->flags);
facb6011
AK
1848 if (ret > 0)
1849 ret = -EIO;
1850 }
1851 } else {
82a2481e
AK
1852 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1853 pfn, ret, page_count(page), page->flags, &page->flags);
facb6011 1854 }
facb6011
AK
1855 return ret;
1856}
86e05773 1857
acc14dc4
NH
1858static int soft_offline_in_use_page(struct page *page, int flags)
1859{
1860 int ret;
d4ae9916 1861 int mt;
acc14dc4
NH
1862 struct page *hpage = compound_head(page);
1863
1864 if (!PageHuge(page) && PageTransHuge(hpage)) {
46612b75 1865 lock_page(page);
1866 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1867 unlock_page(page);
1868 if (!PageAnon(page))
98fd1ef4
NH
1869 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1870 else
1871 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
46612b75 1872 put_hwpoison_page(page);
acc14dc4
NH
1873 return -EBUSY;
1874 }
46612b75 1875 unlock_page(page);
acc14dc4
NH
1876 }
1877
d4ae9916
NH
1878 /*
1879 * Setting MIGRATE_ISOLATE here ensures that the page will be linked
1880 * to free list immediately (not via pcplist) when released after
1881 * successful page migration. Otherwise we can't guarantee that the
1882 * page is really free after put_page() returns, so
1883 * set_hwpoison_free_buddy_page() highly likely fails.
1884 */
1885 mt = get_pageblock_migratetype(page);
1886 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
acc14dc4
NH
1887 if (PageHuge(page))
1888 ret = soft_offline_huge_page(page, flags);
1889 else
1890 ret = __soft_offline_page(page, flags);
d4ae9916 1891 set_pageblock_migratetype(page, mt);
acc14dc4
NH
1892 return ret;
1893}
1894
d4ae9916 1895static int soft_offline_free_page(struct page *page)
acc14dc4 1896{
faf53def 1897 int rc = dissolve_free_huge_page(page);
acc14dc4 1898
d4ae9916
NH
1899 if (!rc) {
1900 if (set_hwpoison_free_buddy_page(page))
1901 num_poisoned_pages_inc();
1902 else
1903 rc = -EBUSY;
1904 }
1905 return rc;
acc14dc4
NH
1906}
1907
86e05773
WL
1908/**
1909 * soft_offline_page - Soft offline a page.
feec24a6 1910 * @pfn: pfn to soft-offline
86e05773
WL
1911 * @flags: flags. Same as memory_failure().
1912 *
1913 * Returns 0 on success, otherwise negated errno.
1914 *
1915 * Soft offline a page, by migration or invalidation,
1916 * without killing anything. This is for the case when
1917 * a page is not corrupted yet (so it's still valid to access),
1918 * but has had a number of corrected errors and is better taken
1919 * out.
1920 *
1921 * The actual policy on when to do that is maintained by
1922 * user space.
1923 *
1924 * This should never impact any application or cause data loss,
1925 * however it might take some time.
1926 *
1927 * This is not a 100% solution for all memory, but tries to be
1928 * ``good enough'' for the majority of memory.
1929 */
feec24a6 1930int soft_offline_page(unsigned long pfn, int flags)
86e05773
WL
1931{
1932 int ret;
feec24a6 1933 struct page *page;
86e05773 1934
feec24a6
NH
1935 if (!pfn_valid(pfn))
1936 return -ENXIO;
1937 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
1938 page = pfn_to_online_page(pfn);
1939 if (!page)
86a66810 1940 return -EIO;
86a66810 1941
86e05773
WL
1942 if (PageHWPoison(page)) {
1943 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1e0e635b 1944 if (flags & MF_COUNT_INCREASED)
665d9da7 1945 put_hwpoison_page(page);
86e05773
WL
1946 return -EBUSY;
1947 }
86e05773 1948
bfc8c901 1949 get_online_mems();
86e05773 1950 ret = get_any_page(page, pfn, flags);
bfc8c901 1951 put_online_mems();
4e41a30c 1952
acc14dc4
NH
1953 if (ret > 0)
1954 ret = soft_offline_in_use_page(page, flags);
1955 else if (ret == 0)
d4ae9916 1956 ret = soft_offline_free_page(page);
4e41a30c 1957
86e05773
WL
1958 return ret;
1959}