]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/memory-failure.c
UBUNTU: SAUCE: mei_vsc: distinguish platform with different camera sensor
[mirror_ubuntu-jammy-kernel.git] / mm / memory-failure.c
CommitLineData
1439f94c 1// SPDX-License-Identifier: GPL-2.0-only
6a46079c
AK
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6a46079c 6 * High level machine check handler. Handles pages reported by the
1c80b990 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
6a46079c 8 * failure.
1c80b990
AK
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
6a46079c
AK
12 *
13 * Handles page cache pages in various states. The tricky part
1c80b990
AK
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
e0de78df
AK
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
1c80b990
AK
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
6a46079c 35 */
6a46079c
AK
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
478c5ffc 39#include <linux/kernel-page-flags.h>
3f07c014 40#include <linux/sched/signal.h>
29930025 41#include <linux/sched/task.h>
01e00f88 42#include <linux/ksm.h>
6a46079c 43#include <linux/rmap.h>
b9e15baf 44#include <linux/export.h>
6a46079c
AK
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
facb6011 48#include <linux/migrate.h>
facb6011 49#include <linux/suspend.h>
5a0e3ad6 50#include <linux/slab.h>
bf998156 51#include <linux/swapops.h>
7af446a8 52#include <linux/hugetlb.h>
20d6c96b 53#include <linux/memory_hotplug.h>
5db8a73a 54#include <linux/mm_inline.h>
6100e34b 55#include <linux/memremap.h>
ea8f5fb8 56#include <linux/kfifo.h>
a5f65109 57#include <linux/ratelimit.h>
d4ae9916 58#include <linux/page-isolation.h>
a3f5d80e 59#include <linux/pagewalk.h>
6a46079c 60#include "internal.h"
97f0b134 61#include "ras/ras_event.h"
6a46079c
AK
62
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
293c07e3 67atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
6a46079c 68
510d25c9
NH
69static bool __page_handle_poison(struct page *page)
70{
f87060d3 71 int ret;
510d25c9
NH
72
73 zone_pcp_disable(page_zone(page));
74 ret = dissolve_free_huge_page(page);
75 if (!ret)
76 ret = take_page_off_buddy(page);
77 zone_pcp_enable(page_zone(page));
78
f87060d3 79 return ret > 0;
510d25c9
NH
80}
81
6b9a217e 82static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
06be6ff3 83{
6b9a217e
OS
84 if (hugepage_or_freepage) {
85 /*
86 * Doing this check for free pages is also fine since dissolve_free_huge_page
87 * returns 0 for non-hugetlb pages as well.
88 */
510d25c9 89 if (!__page_handle_poison(page))
6b9a217e
OS
90 /*
91 * We could fail to take off the target page from buddy
f0953a1b 92 * for example due to racy page allocation, but that's
6b9a217e
OS
93 * acceptable because soft-offlined page is not broken
94 * and if someone really want to use it, they should
95 * take it.
96 */
97 return false;
98 }
99
06be6ff3 100 SetPageHWPoison(page);
79f5f8fa
OS
101 if (release)
102 put_page(page);
06be6ff3
OS
103 page_ref_inc(page);
104 num_poisoned_pages_inc();
6b9a217e
OS
105
106 return true;
06be6ff3
OS
107}
108
27df5068
AK
109#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
110
1bfe5feb 111u32 hwpoison_filter_enable = 0;
7c116f2b
WF
112u32 hwpoison_filter_dev_major = ~0U;
113u32 hwpoison_filter_dev_minor = ~0U;
478c5ffc
WF
114u64 hwpoison_filter_flags_mask;
115u64 hwpoison_filter_flags_value;
1bfe5feb 116EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
7c116f2b
WF
117EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
118EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
478c5ffc
WF
119EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
120EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
7c116f2b
WF
121
122static int hwpoison_filter_dev(struct page *p)
123{
124 struct address_space *mapping;
125 dev_t dev;
126
127 if (hwpoison_filter_dev_major == ~0U &&
128 hwpoison_filter_dev_minor == ~0U)
129 return 0;
130
131 /*
1c80b990 132 * page_mapping() does not accept slab pages.
7c116f2b
WF
133 */
134 if (PageSlab(p))
135 return -EINVAL;
136
137 mapping = page_mapping(p);
138 if (mapping == NULL || mapping->host == NULL)
139 return -EINVAL;
140
141 dev = mapping->host->i_sb->s_dev;
142 if (hwpoison_filter_dev_major != ~0U &&
143 hwpoison_filter_dev_major != MAJOR(dev))
144 return -EINVAL;
145 if (hwpoison_filter_dev_minor != ~0U &&
146 hwpoison_filter_dev_minor != MINOR(dev))
147 return -EINVAL;
148
149 return 0;
150}
151
478c5ffc
WF
152static int hwpoison_filter_flags(struct page *p)
153{
154 if (!hwpoison_filter_flags_mask)
155 return 0;
156
157 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
158 hwpoison_filter_flags_value)
159 return 0;
160 else
161 return -EINVAL;
162}
163
4fd466eb
AK
164/*
165 * This allows stress tests to limit test scope to a collection of tasks
166 * by putting them under some memcg. This prevents killing unrelated/important
167 * processes such as /sbin/init. Note that the target task may share clean
168 * pages with init (eg. libc text), which is harmless. If the target task
169 * share _dirty_ pages with another task B, the test scheme must make sure B
170 * is also included in the memcg. At last, due to race conditions this filter
171 * can only guarantee that the page either belongs to the memcg tasks, or is
172 * a freed page.
173 */
94a59fb3 174#ifdef CONFIG_MEMCG
4fd466eb
AK
175u64 hwpoison_filter_memcg;
176EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
177static int hwpoison_filter_task(struct page *p)
178{
4fd466eb
AK
179 if (!hwpoison_filter_memcg)
180 return 0;
181
94a59fb3 182 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
4fd466eb
AK
183 return -EINVAL;
184
185 return 0;
186}
187#else
188static int hwpoison_filter_task(struct page *p) { return 0; }
189#endif
190
7c116f2b
WF
191int hwpoison_filter(struct page *p)
192{
1bfe5feb
HL
193 if (!hwpoison_filter_enable)
194 return 0;
195
7c116f2b
WF
196 if (hwpoison_filter_dev(p))
197 return -EINVAL;
198
478c5ffc
WF
199 if (hwpoison_filter_flags(p))
200 return -EINVAL;
201
4fd466eb
AK
202 if (hwpoison_filter_task(p))
203 return -EINVAL;
204
7c116f2b
WF
205 return 0;
206}
27df5068
AK
207#else
208int hwpoison_filter(struct page *p)
209{
210 return 0;
211}
212#endif
213
7c116f2b
WF
214EXPORT_SYMBOL_GPL(hwpoison_filter);
215
ae1139ec
DW
216/*
217 * Kill all processes that have a poisoned page mapped and then isolate
218 * the page.
219 *
220 * General strategy:
221 * Find all processes having the page mapped and kill them.
222 * But we keep a page reference around so that the page is not
223 * actually freed yet.
224 * Then stash the page away
225 *
226 * There's no convenient way to get back to mapped processes
227 * from the VMAs. So do a brute-force search over all
228 * running processes.
229 *
230 * Remember that machine checks are not common (or rather
231 * if they are common you have other problems), so this shouldn't
232 * be a performance issue.
233 *
234 * Also there are some races possible while we get from the
235 * error detection to actually handle it.
236 */
237
238struct to_kill {
239 struct list_head nd;
240 struct task_struct *tsk;
241 unsigned long addr;
242 short size_shift;
ae1139ec
DW
243};
244
6a46079c 245/*
7329bbeb
TL
246 * Send all the processes who have the page mapped a signal.
247 * ``action optional'' if they are not immediately affected by the error
248 * ``action required'' if error happened in current execution context
6a46079c 249 */
ae1139ec 250static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
6a46079c 251{
ae1139ec
DW
252 struct task_struct *t = tk->tsk;
253 short addr_lsb = tk->size_shift;
872e9a20 254 int ret = 0;
6a46079c 255
03151c6e 256 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
872e9a20 257 pfn, t->comm, t->pid);
7329bbeb 258
872e9a20 259 if (flags & MF_ACTION_REQUIRED) {
30c9cf49
AY
260 if (t == current)
261 ret = force_sig_mceerr(BUS_MCEERR_AR,
872e9a20 262 (void __user *)tk->addr, addr_lsb);
30c9cf49
AY
263 else
264 /* Signal other processes sharing the page if they have PF_MCE_EARLY set. */
265 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
266 addr_lsb, t);
7329bbeb
TL
267 } else {
268 /*
269 * Don't use force here, it's convenient if the signal
270 * can be temporarily blocked.
271 * This could cause a loop when the user sets SIGBUS
272 * to SIG_IGN, but hopefully no one will do that?
273 */
ae1139ec 274 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
c0f45555 275 addr_lsb, t); /* synchronous? */
7329bbeb 276 }
6a46079c 277 if (ret < 0)
495367c0 278 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
1170532b 279 t->comm, t->pid, ret);
6a46079c
AK
280 return ret;
281}
282
588f9ce6 283/*
47e431f4 284 * Unknown page type encountered. Try to check whether it can turn PageLRU by
d0505e9f 285 * lru_add_drain_all.
588f9ce6 286 */
d0505e9f 287void shake_page(struct page *p)
588f9ce6 288{
8bcb74de
NH
289 if (PageHuge(p))
290 return;
291
588f9ce6
AK
292 if (!PageSlab(p)) {
293 lru_add_drain_all();
588f9ce6
AK
294 if (PageLRU(p) || is_free_buddy_page(p))
295 return;
296 }
facb6011 297
588f9ce6 298 /*
d0505e9f
YS
299 * TODO: Could shrink slab caches here if a lightweight range-based
300 * shrinker will be available.
588f9ce6
AK
301 */
302}
303EXPORT_SYMBOL_GPL(shake_page);
304
6100e34b
DW
305static unsigned long dev_pagemap_mapping_shift(struct page *page,
306 struct vm_area_struct *vma)
307{
308 unsigned long address = vma_address(page, vma);
5c91c0e7 309 unsigned long ret = 0;
6100e34b
DW
310 pgd_t *pgd;
311 p4d_t *p4d;
312 pud_t *pud;
313 pmd_t *pmd;
314 pte_t *pte;
315
316 pgd = pgd_offset(vma->vm_mm, address);
317 if (!pgd_present(*pgd))
318 return 0;
319 p4d = p4d_offset(pgd, address);
320 if (!p4d_present(*p4d))
321 return 0;
322 pud = pud_offset(p4d, address);
323 if (!pud_present(*pud))
324 return 0;
325 if (pud_devmap(*pud))
326 return PUD_SHIFT;
327 pmd = pmd_offset(pud, address);
328 if (!pmd_present(*pmd))
329 return 0;
330 if (pmd_devmap(*pmd))
331 return PMD_SHIFT;
332 pte = pte_offset_map(pmd, address);
5c91c0e7
QZ
333 if (pte_present(*pte) && pte_devmap(*pte))
334 ret = PAGE_SHIFT;
335 pte_unmap(pte);
336 return ret;
6100e34b 337}
6a46079c
AK
338
339/*
340 * Failure handling: if we can't find or can't kill a process there's
341 * not much we can do. We just print a message and ignore otherwise.
342 */
343
344/*
345 * Schedule a process for later kill.
346 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
6a46079c
AK
347 */
348static void add_to_kill(struct task_struct *tsk, struct page *p,
349 struct vm_area_struct *vma,
996ff7a0 350 struct list_head *to_kill)
6a46079c
AK
351{
352 struct to_kill *tk;
353
996ff7a0
JC
354 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
355 if (!tk) {
356 pr_err("Memory failure: Out of memory while machine check handling\n");
357 return;
6a46079c 358 }
996ff7a0 359
6a46079c 360 tk->addr = page_address_in_vma(p, vma);
6100e34b
DW
361 if (is_zone_device_page(p))
362 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
363 else
75068518 364 tk->size_shift = page_shift(compound_head(p));
6a46079c
AK
365
366 /*
3d7fed4a
JC
367 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
368 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
369 * so "tk->size_shift == 0" effectively checks no mapping on
370 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
371 * to a process' address space, it's possible not all N VMAs
372 * contain mappings for the page, but at least one VMA does.
373 * Only deliver SIGBUS with payload derived from the VMA that
374 * has a mapping for the page.
6a46079c 375 */
3d7fed4a 376 if (tk->addr == -EFAULT) {
495367c0 377 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
6a46079c 378 page_to_pfn(p), tsk->comm);
3d7fed4a
JC
379 } else if (tk->size_shift == 0) {
380 kfree(tk);
381 return;
6a46079c 382 }
996ff7a0 383
6a46079c
AK
384 get_task_struct(tsk);
385 tk->tsk = tsk;
386 list_add_tail(&tk->nd, to_kill);
387}
388
389/*
390 * Kill the processes that have been collected earlier.
391 *
a21c184f
ML
392 * Only do anything when FORCEKILL is set, otherwise just free the
393 * list (this is used for clean pages which do not need killing)
6a46079c
AK
394 * Also when FAIL is set do a force kill because something went
395 * wrong earlier.
396 */
ae1139ec
DW
397static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
398 unsigned long pfn, int flags)
6a46079c
AK
399{
400 struct to_kill *tk, *next;
401
402 list_for_each_entry_safe (tk, next, to_kill, nd) {
6751ed65 403 if (forcekill) {
6a46079c 404 /*
af901ca1 405 * In case something went wrong with munmapping
6a46079c
AK
406 * make sure the process doesn't catch the
407 * signal and then access the memory. Just kill it.
6a46079c 408 */
3d7fed4a 409 if (fail || tk->addr == -EFAULT) {
495367c0 410 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
1170532b 411 pfn, tk->tsk->comm, tk->tsk->pid);
6376360e
NH
412 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
413 tk->tsk, PIDTYPE_PID);
6a46079c
AK
414 }
415
416 /*
417 * In theory the process could have mapped
418 * something else on the address in-between. We could
419 * check for that, but we need to tell the
420 * process anyways.
421 */
ae1139ec 422 else if (kill_proc(tk, pfn, flags) < 0)
495367c0 423 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
1170532b 424 pfn, tk->tsk->comm, tk->tsk->pid);
6a46079c
AK
425 }
426 put_task_struct(tk->tsk);
427 kfree(tk);
428 }
429}
430
3ba08129
NH
431/*
432 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
433 * on behalf of the thread group. Return task_struct of the (first found)
434 * dedicated thread if found, and return NULL otherwise.
435 *
436 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
437 * have to call rcu_read_lock/unlock() in this function.
438 */
439static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
6a46079c 440{
3ba08129
NH
441 struct task_struct *t;
442
4e018b45
NH
443 for_each_thread(tsk, t) {
444 if (t->flags & PF_MCE_PROCESS) {
445 if (t->flags & PF_MCE_EARLY)
446 return t;
447 } else {
448 if (sysctl_memory_failure_early_kill)
449 return t;
450 }
451 }
3ba08129
NH
452 return NULL;
453}
454
455/*
456 * Determine whether a given process is "early kill" process which expects
457 * to be signaled when some page under the process is hwpoisoned.
458 * Return task_struct of the dedicated thread (main thread unless explicitly
30c9cf49 459 * specified) if the process is "early kill" and otherwise returns NULL.
03151c6e 460 *
30c9cf49
AY
461 * Note that the above is true for Action Optional case. For Action Required
462 * case, it's only meaningful to the current thread which need to be signaled
463 * with SIGBUS, this error is Action Optional for other non current
464 * processes sharing the same error page,if the process is "early kill", the
465 * task_struct of the dedicated thread will also be returned.
3ba08129
NH
466 */
467static struct task_struct *task_early_kill(struct task_struct *tsk,
468 int force_early)
469{
6a46079c 470 if (!tsk->mm)
3ba08129 471 return NULL;
30c9cf49
AY
472 /*
473 * Comparing ->mm here because current task might represent
474 * a subthread, while tsk always points to the main thread.
475 */
476 if (force_early && tsk->mm == current->mm)
477 return current;
478
4e018b45 479 return find_early_kill_thread(tsk);
6a46079c
AK
480}
481
482/*
483 * Collect processes when the error hit an anonymous page.
484 */
485static void collect_procs_anon(struct page *page, struct list_head *to_kill,
996ff7a0 486 int force_early)
6a46079c
AK
487{
488 struct vm_area_struct *vma;
489 struct task_struct *tsk;
490 struct anon_vma *av;
bf181b9f 491 pgoff_t pgoff;
6a46079c 492
4fc3f1d6 493 av = page_lock_anon_vma_read(page);
6a46079c 494 if (av == NULL) /* Not actually mapped anymore */
9b679320
PZ
495 return;
496
a0f7a756 497 pgoff = page_to_pgoff(page);
9b679320 498 read_lock(&tasklist_lock);
6a46079c 499 for_each_process (tsk) {
5beb4930 500 struct anon_vma_chain *vmac;
3ba08129 501 struct task_struct *t = task_early_kill(tsk, force_early);
5beb4930 502
3ba08129 503 if (!t)
6a46079c 504 continue;
bf181b9f
ML
505 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
506 pgoff, pgoff) {
5beb4930 507 vma = vmac->vma;
6a46079c
AK
508 if (!page_mapped_in_vma(page, vma))
509 continue;
3ba08129 510 if (vma->vm_mm == t->mm)
996ff7a0 511 add_to_kill(t, page, vma, to_kill);
6a46079c
AK
512 }
513 }
6a46079c 514 read_unlock(&tasklist_lock);
4fc3f1d6 515 page_unlock_anon_vma_read(av);
6a46079c
AK
516}
517
518/*
519 * Collect processes when the error hit a file mapped page.
520 */
521static void collect_procs_file(struct page *page, struct list_head *to_kill,
996ff7a0 522 int force_early)
6a46079c
AK
523{
524 struct vm_area_struct *vma;
525 struct task_struct *tsk;
6a46079c 526 struct address_space *mapping = page->mapping;
c43bc03d 527 pgoff_t pgoff;
6a46079c 528
d28eb9c8 529 i_mmap_lock_read(mapping);
9b679320 530 read_lock(&tasklist_lock);
c43bc03d 531 pgoff = page_to_pgoff(page);
6a46079c 532 for_each_process(tsk) {
3ba08129 533 struct task_struct *t = task_early_kill(tsk, force_early);
6a46079c 534
3ba08129 535 if (!t)
6a46079c 536 continue;
6b2dbba8 537 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
6a46079c
AK
538 pgoff) {
539 /*
540 * Send early kill signal to tasks where a vma covers
541 * the page but the corrupted page is not necessarily
542 * mapped it in its pte.
543 * Assume applications who requested early kill want
544 * to be informed of all such data corruptions.
545 */
3ba08129 546 if (vma->vm_mm == t->mm)
996ff7a0 547 add_to_kill(t, page, vma, to_kill);
6a46079c
AK
548 }
549 }
6a46079c 550 read_unlock(&tasklist_lock);
d28eb9c8 551 i_mmap_unlock_read(mapping);
6a46079c
AK
552}
553
554/*
555 * Collect the processes who have the corrupted page mapped to kill.
6a46079c 556 */
74614de1
TL
557static void collect_procs(struct page *page, struct list_head *tokill,
558 int force_early)
6a46079c 559{
6a46079c
AK
560 if (!page->mapping)
561 return;
562
6a46079c 563 if (PageAnon(page))
996ff7a0 564 collect_procs_anon(page, tokill, force_early);
6a46079c 565 else
996ff7a0 566 collect_procs_file(page, tokill, force_early);
6a46079c
AK
567}
568
a3f5d80e
NH
569struct hwp_walk {
570 struct to_kill tk;
571 unsigned long pfn;
572 int flags;
573};
574
575static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
576{
577 tk->addr = addr;
578 tk->size_shift = shift;
579}
580
581static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
582 unsigned long poisoned_pfn, struct to_kill *tk)
583{
584 unsigned long pfn = 0;
585
586 if (pte_present(pte)) {
587 pfn = pte_pfn(pte);
588 } else {
589 swp_entry_t swp = pte_to_swp_entry(pte);
590
591 if (is_hwpoison_entry(swp))
592 pfn = hwpoison_entry_to_pfn(swp);
593 }
594
595 if (!pfn || pfn != poisoned_pfn)
596 return 0;
597
598 set_to_kill(tk, addr, shift);
599 return 1;
600}
601
602#ifdef CONFIG_TRANSPARENT_HUGEPAGE
603static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
604 struct hwp_walk *hwp)
605{
606 pmd_t pmd = *pmdp;
607 unsigned long pfn;
608 unsigned long hwpoison_vaddr;
609
610 if (!pmd_present(pmd))
611 return 0;
612 pfn = pmd_pfn(pmd);
613 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
614 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
615 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
616 return 1;
617 }
618 return 0;
619}
620#else
621static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
622 struct hwp_walk *hwp)
623{
624 return 0;
625}
626#endif
627
628static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
629 unsigned long end, struct mm_walk *walk)
630{
631 struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
632 int ret = 0;
ea3732f7 633 pte_t *ptep, *mapped_pte;
a3f5d80e
NH
634 spinlock_t *ptl;
635
636 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
637 if (ptl) {
638 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
639 spin_unlock(ptl);
640 goto out;
641 }
642
643 if (pmd_trans_unstable(pmdp))
644 goto out;
645
ea3732f7
ML
646 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
647 addr, &ptl);
a3f5d80e
NH
648 for (; addr != end; ptep++, addr += PAGE_SIZE) {
649 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
650 hwp->pfn, &hwp->tk);
651 if (ret == 1)
652 break;
653 }
ea3732f7 654 pte_unmap_unlock(mapped_pte, ptl);
a3f5d80e
NH
655out:
656 cond_resched();
657 return ret;
658}
659
660#ifdef CONFIG_HUGETLB_PAGE
661static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
662 unsigned long addr, unsigned long end,
663 struct mm_walk *walk)
664{
665 struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
666 pte_t pte = huge_ptep_get(ptep);
667 struct hstate *h = hstate_vma(walk->vma);
668
669 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
670 hwp->pfn, &hwp->tk);
671}
672#else
673#define hwpoison_hugetlb_range NULL
674#endif
675
676static struct mm_walk_ops hwp_walk_ops = {
677 .pmd_entry = hwpoison_pte_range,
678 .hugetlb_entry = hwpoison_hugetlb_range,
679};
680
681/*
682 * Sends SIGBUS to the current process with error info.
683 *
684 * This function is intended to handle "Action Required" MCEs on already
685 * hardware poisoned pages. They could happen, for example, when
686 * memory_failure() failed to unmap the error page at the first call, or
687 * when multiple local machine checks happened on different CPUs.
688 *
689 * MCE handler currently has no easy access to the error virtual address,
690 * so this function walks page table to find it. The returned virtual address
691 * is proper in most cases, but it could be wrong when the application
692 * process has multiple entries mapping the error page.
693 */
694static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
695 int flags)
696{
697 int ret;
698 struct hwp_walk priv = {
699 .pfn = pfn,
700 };
701 priv.tk.tsk = p;
702
703 mmap_read_lock(p->mm);
704 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
705 (void *)&priv);
706 if (ret == 1 && priv.tk.addr)
707 kill_proc(&priv.tk, pfn, flags);
708 mmap_read_unlock(p->mm);
709 return ret ? -EFAULT : -EHWPOISON;
710}
711
6a46079c 712static const char *action_name[] = {
cc637b17
XX
713 [MF_IGNORED] = "Ignored",
714 [MF_FAILED] = "Failed",
715 [MF_DELAYED] = "Delayed",
716 [MF_RECOVERED] = "Recovered",
64d37a2b
NH
717};
718
719static const char * const action_page_types[] = {
cc637b17
XX
720 [MF_MSG_KERNEL] = "reserved kernel page",
721 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
722 [MF_MSG_SLAB] = "kernel slab page",
723 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
724 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
725 [MF_MSG_HUGE] = "huge page",
726 [MF_MSG_FREE_HUGE] = "free huge page",
31286a84 727 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
cc637b17
XX
728 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
729 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
730 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
731 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
732 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
733 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
734 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
735 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
736 [MF_MSG_CLEAN_LRU] = "clean LRU page",
737 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
738 [MF_MSG_BUDDY] = "free buddy page",
739 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
6100e34b 740 [MF_MSG_DAX] = "dax page",
5d1fd5dc 741 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
cc637b17 742 [MF_MSG_UNKNOWN] = "unknown page",
64d37a2b
NH
743};
744
dc2a1cbf
WF
745/*
746 * XXX: It is possible that a page is isolated from LRU cache,
747 * and then kept in swap cache or failed to remove from page cache.
748 * The page count will stop it from being freed by unpoison.
749 * Stress tests should be aware of this memory leak problem.
750 */
751static int delete_from_lru_cache(struct page *p)
752{
753 if (!isolate_lru_page(p)) {
754 /*
755 * Clear sensible page flags, so that the buddy system won't
756 * complain when the page is unpoison-and-freed.
757 */
758 ClearPageActive(p);
759 ClearPageUnevictable(p);
18365225
MH
760
761 /*
762 * Poisoned page might never drop its ref count to 0 so we have
763 * to uncharge it manually from its memcg.
764 */
765 mem_cgroup_uncharge(p);
766
dc2a1cbf
WF
767 /*
768 * drop the page count elevated by isolate_lru_page()
769 */
09cbfeaf 770 put_page(p);
dc2a1cbf
WF
771 return 0;
772 }
773 return -EIO;
774}
775
78bb9203
NH
776static int truncate_error_page(struct page *p, unsigned long pfn,
777 struct address_space *mapping)
778{
779 int ret = MF_FAILED;
780
781 if (mapping->a_ops->error_remove_page) {
782 int err = mapping->a_ops->error_remove_page(mapping, p);
783
784 if (err != 0) {
785 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
786 pfn, err);
787 } else if (page_has_private(p) &&
788 !try_to_release_page(p, GFP_NOIO)) {
789 pr_info("Memory failure: %#lx: failed to release buffers\n",
790 pfn);
791 } else {
792 ret = MF_RECOVERED;
793 }
794 } else {
795 /*
796 * If the file system doesn't support it just invalidate
797 * This fails on dirty or anything with private pages
798 */
799 if (invalidate_inode_page(p))
800 ret = MF_RECOVERED;
801 else
802 pr_info("Memory failure: %#lx: Failed to invalidate\n",
803 pfn);
804 }
805
806 return ret;
807}
808
6a46079c
AK
809/*
810 * Error hit kernel page.
811 * Do nothing, try to be lucky and not touch this instead. For a few cases we
812 * could be more sophisticated.
813 */
814static int me_kernel(struct page *p, unsigned long pfn)
6a46079c 815{
ea6d0630 816 unlock_page(p);
cc637b17 817 return MF_IGNORED;
6a46079c
AK
818}
819
820/*
821 * Page in unknown state. Do nothing.
822 */
823static int me_unknown(struct page *p, unsigned long pfn)
824{
495367c0 825 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
ea6d0630 826 unlock_page(p);
cc637b17 827 return MF_FAILED;
6a46079c
AK
828}
829
6a46079c
AK
830/*
831 * Clean (or cleaned) page cache page.
832 */
833static int me_pagecache_clean(struct page *p, unsigned long pfn)
834{
ea6d0630 835 int ret;
6a46079c
AK
836 struct address_space *mapping;
837
dc2a1cbf
WF
838 delete_from_lru_cache(p);
839
6a46079c
AK
840 /*
841 * For anonymous pages we're done the only reference left
842 * should be the one m_f() holds.
843 */
ea6d0630
NH
844 if (PageAnon(p)) {
845 ret = MF_RECOVERED;
846 goto out;
847 }
6a46079c
AK
848
849 /*
850 * Now truncate the page in the page cache. This is really
851 * more like a "temporary hole punch"
852 * Don't do this for block devices when someone else
853 * has a reference, because it could be file system metadata
854 * and that's not safe to truncate.
855 */
856 mapping = page_mapping(p);
857 if (!mapping) {
858 /*
859 * Page has been teared down in the meanwhile
860 */
ea6d0630
NH
861 ret = MF_FAILED;
862 goto out;
6a46079c
AK
863 }
864
865 /*
866 * Truncation is a bit tricky. Enable it per file system for now.
867 *
9608703e 868 * Open: to take i_rwsem or not for this? Right now we don't.
6a46079c 869 */
ea6d0630
NH
870 ret = truncate_error_page(p, pfn, mapping);
871out:
872 unlock_page(p);
873 return ret;
6a46079c
AK
874}
875
876/*
549543df 877 * Dirty pagecache page
6a46079c
AK
878 * Issues: when the error hit a hole page the error is not properly
879 * propagated.
880 */
881static int me_pagecache_dirty(struct page *p, unsigned long pfn)
882{
883 struct address_space *mapping = page_mapping(p);
884
885 SetPageError(p);
886 /* TBD: print more information about the file. */
887 if (mapping) {
888 /*
889 * IO error will be reported by write(), fsync(), etc.
890 * who check the mapping.
891 * This way the application knows that something went
892 * wrong with its dirty file data.
893 *
894 * There's one open issue:
895 *
896 * The EIO will be only reported on the next IO
897 * operation and then cleared through the IO map.
898 * Normally Linux has two mechanisms to pass IO error
899 * first through the AS_EIO flag in the address space
900 * and then through the PageError flag in the page.
901 * Since we drop pages on memory failure handling the
902 * only mechanism open to use is through AS_AIO.
903 *
904 * This has the disadvantage that it gets cleared on
905 * the first operation that returns an error, while
906 * the PageError bit is more sticky and only cleared
907 * when the page is reread or dropped. If an
908 * application assumes it will always get error on
909 * fsync, but does other operations on the fd before
25985edc 910 * and the page is dropped between then the error
6a46079c
AK
911 * will not be properly reported.
912 *
913 * This can already happen even without hwpoisoned
914 * pages: first on metadata IO errors (which only
915 * report through AS_EIO) or when the page is dropped
916 * at the wrong time.
917 *
918 * So right now we assume that the application DTRT on
919 * the first EIO, but we're not worse than other parts
920 * of the kernel.
921 */
af21bfaf 922 mapping_set_error(mapping, -EIO);
6a46079c
AK
923 }
924
925 return me_pagecache_clean(p, pfn);
926}
927
928/*
929 * Clean and dirty swap cache.
930 *
931 * Dirty swap cache page is tricky to handle. The page could live both in page
932 * cache and swap cache(ie. page is freshly swapped in). So it could be
933 * referenced concurrently by 2 types of PTEs:
934 * normal PTEs and swap PTEs. We try to handle them consistently by calling
935 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
936 * and then
937 * - clear dirty bit to prevent IO
938 * - remove from LRU
939 * - but keep in the swap cache, so that when we return to it on
940 * a later page fault, we know the application is accessing
941 * corrupted data and shall be killed (we installed simple
942 * interception code in do_swap_page to catch it).
943 *
944 * Clean swap cache pages can be directly isolated. A later page fault will
945 * bring in the known good data from disk.
946 */
947static int me_swapcache_dirty(struct page *p, unsigned long pfn)
948{
ea6d0630
NH
949 int ret;
950
6a46079c
AK
951 ClearPageDirty(p);
952 /* Trigger EIO in shmem: */
953 ClearPageUptodate(p);
954
ea6d0630
NH
955 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
956 unlock_page(p);
957 return ret;
6a46079c
AK
958}
959
960static int me_swapcache_clean(struct page *p, unsigned long pfn)
961{
ea6d0630
NH
962 int ret;
963
6a46079c 964 delete_from_swap_cache(p);
e43c3afb 965
ea6d0630
NH
966 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
967 unlock_page(p);
968 return ret;
6a46079c
AK
969}
970
971/*
972 * Huge pages. Needs work.
973 * Issues:
93f70f90
NH
974 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
975 * To narrow down kill region to one page, we need to break up pmd.
6a46079c
AK
976 */
977static int me_huge_page(struct page *p, unsigned long pfn)
978{
a8b2c2ce 979 int res;
93f70f90 980 struct page *hpage = compound_head(p);
78bb9203 981 struct address_space *mapping;
2491ffee
NH
982
983 if (!PageHuge(hpage))
984 return MF_DELAYED;
985
78bb9203
NH
986 mapping = page_mapping(hpage);
987 if (mapping) {
988 res = truncate_error_page(hpage, pfn, mapping);
ea6d0630 989 unlock_page(hpage);
78bb9203 990 } else {
a8b2c2ce 991 res = MF_FAILED;
78bb9203
NH
992 unlock_page(hpage);
993 /*
994 * migration entry prevents later access on error anonymous
995 * hugepage, so we can free and dissolve it into buddy to
996 * save healthy subpages.
997 */
998 if (PageAnon(hpage))
999 put_page(hpage);
510d25c9 1000 if (__page_handle_poison(p)) {
a8b2c2ce
OS
1001 page_ref_inc(p);
1002 res = MF_RECOVERED;
1003 }
93f70f90 1004 }
78bb9203
NH
1005
1006 return res;
6a46079c
AK
1007}
1008
1009/*
1010 * Various page states we can handle.
1011 *
1012 * A page state is defined by its current page->flags bits.
1013 * The table matches them in order and calls the right handler.
1014 *
1015 * This is quite tricky because we can access page at any time
25985edc 1016 * in its live cycle, so all accesses have to be extremely careful.
6a46079c
AK
1017 *
1018 * This is not complete. More states could be added.
1019 * For any missing state don't attempt recovery.
1020 */
1021
1022#define dirty (1UL << PG_dirty)
6326fec1 1023#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
6a46079c
AK
1024#define unevict (1UL << PG_unevictable)
1025#define mlock (1UL << PG_mlocked)
6a46079c 1026#define lru (1UL << PG_lru)
6a46079c 1027#define head (1UL << PG_head)
6a46079c 1028#define slab (1UL << PG_slab)
6a46079c
AK
1029#define reserved (1UL << PG_reserved)
1030
1031static struct page_state {
1032 unsigned long mask;
1033 unsigned long res;
cc637b17 1034 enum mf_action_page_type type;
ea6d0630
NH
1035
1036 /* Callback ->action() has to unlock the relevant page inside it. */
6a46079c
AK
1037 int (*action)(struct page *p, unsigned long pfn);
1038} error_states[] = {
cc637b17 1039 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
95d01fc6
WF
1040 /*
1041 * free pages are specially detected outside this table:
1042 * PG_buddy pages only make a small fraction of all free pages.
1043 */
6a46079c
AK
1044
1045 /*
1046 * Could in theory check if slab page is free or if we can drop
1047 * currently unused objects without touching them. But just
1048 * treat it as standard kernel for now.
1049 */
cc637b17 1050 { slab, slab, MF_MSG_SLAB, me_kernel },
6a46079c 1051
cc637b17 1052 { head, head, MF_MSG_HUGE, me_huge_page },
6a46079c 1053
cc637b17
XX
1054 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1055 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
6a46079c 1056
cc637b17
XX
1057 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1058 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
6a46079c 1059
cc637b17
XX
1060 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1061 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
5f4b9fc5 1062
cc637b17
XX
1063 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1064 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
6a46079c
AK
1065
1066 /*
1067 * Catchall entry: must be at end.
1068 */
cc637b17 1069 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
6a46079c
AK
1070};
1071
2326c467
AK
1072#undef dirty
1073#undef sc
1074#undef unevict
1075#undef mlock
2326c467 1076#undef lru
2326c467 1077#undef head
2326c467
AK
1078#undef slab
1079#undef reserved
1080
ff604cf6
NH
1081/*
1082 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1083 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1084 */
cc3e2af4
XX
1085static void action_result(unsigned long pfn, enum mf_action_page_type type,
1086 enum mf_result result)
6a46079c 1087{
97f0b134
XX
1088 trace_memory_failure_event(pfn, type, result);
1089
495367c0 1090 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
64d37a2b 1091 pfn, action_page_types[type], action_name[result]);
6a46079c
AK
1092}
1093
1094static int page_action(struct page_state *ps, struct page *p,
bd1ce5f9 1095 unsigned long pfn)
6a46079c
AK
1096{
1097 int result;
7456b040 1098 int count;
6a46079c 1099
ea6d0630 1100 /* page p should be unlocked after returning from ps->action(). */
6a46079c 1101 result = ps->action(p, pfn);
7456b040 1102
bd1ce5f9 1103 count = page_count(p) - 1;
cc637b17 1104 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
138ce286 1105 count--;
78bb9203 1106 if (count > 0) {
495367c0 1107 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
64d37a2b 1108 pfn, action_page_types[ps->type], count);
cc637b17 1109 result = MF_FAILED;
138ce286 1110 }
64d37a2b 1111 action_result(pfn, ps->type, result);
6a46079c
AK
1112
1113 /* Could do more checks here if page looks ok */
1114 /*
1115 * Could adjust zone counters here to correct for the missing page.
1116 */
1117
cc637b17 1118 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
6a46079c
AK
1119}
1120
25182f05
NH
1121/*
1122 * Return true if a page type of a given page is supported by hwpoison
1123 * mechanism (while handling could fail), otherwise false. This function
1124 * does not return true for hugetlb or device memory pages, so it's assumed
1125 * to be called only in the context where we never have such pages.
1126 */
1127static inline bool HWPoisonHandlable(struct page *page)
1128{
acfa299a 1129 return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
25182f05
NH
1130}
1131
17e395b6 1132static int __get_hwpoison_page(struct page *page)
ead07f6a
NH
1133{
1134 struct page *head = compound_head(page);
25182f05
NH
1135 int ret = 0;
1136 bool hugetlb = false;
1137
1138 ret = get_hwpoison_huge_page(head, &hugetlb);
1139 if (hugetlb)
1140 return ret;
1141
1142 /*
1143 * This check prevents from calling get_hwpoison_unless_zero()
1144 * for any unsupported type of page in order to reduce the risk of
1145 * unexpected races caused by taking a page refcount.
1146 */
1147 if (!HWPoisonHandlable(head))
fcc00621 1148 return -EBUSY;
ead07f6a 1149
c2e7e00b
KK
1150 if (get_page_unless_zero(head)) {
1151 if (head == compound_head(page))
1152 return 1;
1153
495367c0
CY
1154 pr_info("Memory failure: %#lx cannot catch tail\n",
1155 page_to_pfn(page));
c2e7e00b
KK
1156 put_page(head);
1157 }
1158
1159 return 0;
ead07f6a 1160}
ead07f6a 1161
2f714160 1162static int get_any_page(struct page *p, unsigned long flags)
17e395b6 1163{
2f714160
OS
1164 int ret = 0, pass = 0;
1165 bool count_increased = false;
17e395b6 1166
2f714160
OS
1167 if (flags & MF_COUNT_INCREASED)
1168 count_increased = true;
1169
1170try_again:
0ed950d1
NH
1171 if (!count_increased) {
1172 ret = __get_hwpoison_page(p);
1173 if (!ret) {
1174 if (page_count(p)) {
1175 /* We raced with an allocation, retry. */
1176 if (pass++ < 3)
1177 goto try_again;
1178 ret = -EBUSY;
1179 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1180 /* We raced with put_page, retry. */
1181 if (pass++ < 3)
1182 goto try_again;
1183 ret = -EIO;
1184 }
1185 goto out;
1186 } else if (ret == -EBUSY) {
fcc00621
NH
1187 /*
1188 * We raced with (possibly temporary) unhandlable
1189 * page, retry.
1190 */
1191 if (pass++ < 3) {
d0505e9f 1192 shake_page(p);
2f714160 1193 goto try_again;
fcc00621
NH
1194 }
1195 ret = -EIO;
0ed950d1 1196 goto out;
2f714160 1197 }
0ed950d1
NH
1198 }
1199
1200 if (PageHuge(p) || HWPoisonHandlable(p)) {
1201 ret = 1;
2f714160 1202 } else {
0ed950d1
NH
1203 /*
1204 * A page we cannot handle. Check whether we can turn
1205 * it into something we can handle.
1206 */
1207 if (pass++ < 3) {
2f714160 1208 put_page(p);
d0505e9f 1209 shake_page(p);
0ed950d1
NH
1210 count_increased = false;
1211 goto try_again;
2f714160 1212 }
0ed950d1
NH
1213 put_page(p);
1214 ret = -EIO;
17e395b6 1215 }
0ed950d1 1216out:
941ca063
YS
1217 if (ret == -EIO)
1218 dump_page(p, "hwpoison: unhandlable page");
1219
17e395b6
OS
1220 return ret;
1221}
1222
0ed950d1
NH
1223/**
1224 * get_hwpoison_page() - Get refcount for memory error handling
1225 * @p: Raw error page (hit by memory error)
1226 * @flags: Flags controlling behavior of error handling
1227 *
1228 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1229 * error on it, after checking that the error page is in a well-defined state
1230 * (defined as a page-type we can successfully handle the memor error on it,
1231 * such as LRU page and hugetlb page).
1232 *
1233 * Memory error handling could be triggered at any time on any type of page,
1234 * so it's prone to race with typical memory management lifecycle (like
1235 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1236 * extra care for the error page's state (as done in __get_hwpoison_page()),
1237 * and has some retry logic in get_any_page().
1238 *
1239 * Return: 0 on failure,
1240 * 1 on success for in-use pages in a well-defined state,
1241 * -EIO for pages on which we can not handle memory errors,
1242 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1243 * operations like allocation and free.
1244 */
1245static int get_hwpoison_page(struct page *p, unsigned long flags)
2f714160
OS
1246{
1247 int ret;
1248
1249 zone_pcp_disable(page_zone(p));
0ed950d1 1250 ret = get_any_page(p, flags);
2f714160
OS
1251 zone_pcp_enable(page_zone(p));
1252
1253 return ret;
1254}
1255
6a46079c
AK
1256/*
1257 * Do all that is necessary to remove user space mappings. Unmap
1258 * the pages and send SIGBUS to the processes if the data was dirty.
1259 */
666e5a40 1260static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
ed8c2f49 1261 int flags, struct page *hpage)
6a46079c 1262{
36af6737 1263 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
6a46079c
AK
1264 struct address_space *mapping;
1265 LIST_HEAD(tokill);
1fb08ac6 1266 bool unmap_success;
6751ed65 1267 int kill = 1, forcekill;
286c469a 1268 bool mlocked = PageMlocked(hpage);
6a46079c 1269
93a9eb39
NH
1270 /*
1271 * Here we are interested only in user-mapped pages, so skip any
1272 * other types of pages.
1273 */
1274 if (PageReserved(p) || PageSlab(p))
666e5a40 1275 return true;
93a9eb39 1276 if (!(PageLRU(hpage) || PageHuge(p)))
666e5a40 1277 return true;
6a46079c 1278
6a46079c
AK
1279 /*
1280 * This check implies we don't kill processes if their pages
1281 * are in the swap cache early. Those are always late kills.
1282 */
7af446a8 1283 if (!page_mapped(hpage))
666e5a40 1284 return true;
1668bfd5 1285
52089b14 1286 if (PageKsm(p)) {
495367c0 1287 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
666e5a40 1288 return false;
52089b14 1289 }
6a46079c
AK
1290
1291 if (PageSwapCache(p)) {
495367c0
CY
1292 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
1293 pfn);
6a46079c
AK
1294 ttu |= TTU_IGNORE_HWPOISON;
1295 }
1296
1297 /*
1298 * Propagate the dirty bit from PTEs to struct page first, because we
1299 * need this to decide if we should kill or just drop the page.
db0480b3
WF
1300 * XXX: the dirty test could be racy: set_page_dirty() may not always
1301 * be called inside page lock (it's recommended but not enforced).
6a46079c 1302 */
7af446a8 1303 mapping = page_mapping(hpage);
6751ed65 1304 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
f56753ac 1305 mapping_can_writeback(mapping)) {
7af446a8
NH
1306 if (page_mkclean(hpage)) {
1307 SetPageDirty(hpage);
6a46079c
AK
1308 } else {
1309 kill = 0;
1310 ttu |= TTU_IGNORE_HWPOISON;
495367c0 1311 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
6a46079c
AK
1312 pfn);
1313 }
1314 }
1315
1316 /*
1317 * First collect all the processes that have the page
1318 * mapped in dirty form. This has to be done before try_to_unmap,
1319 * because ttu takes the rmap data structures down.
1320 *
1321 * Error handling: We ignore errors here because
1322 * there's nothing that can be done.
1323 */
1324 if (kill)
415c64c1 1325 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
6a46079c 1326
c0d0381a 1327 if (!PageHuge(hpage)) {
1fb08ac6 1328 try_to_unmap(hpage, ttu);
c0d0381a 1329 } else {
336bf30e
MK
1330 if (!PageAnon(hpage)) {
1331 /*
1332 * For hugetlb pages in shared mappings, try_to_unmap
1333 * could potentially call huge_pmd_unshare. Because of
1334 * this, take semaphore in write mode here and set
1335 * TTU_RMAP_LOCKED to indicate we have taken the lock
041711ce 1336 * at this higher level.
336bf30e
MK
1337 */
1338 mapping = hugetlb_page_mapping_lock_write(hpage);
1339 if (mapping) {
1fb08ac6 1340 try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
336bf30e 1341 i_mmap_unlock_write(mapping);
1fb08ac6 1342 } else
336bf30e 1343 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
c0d0381a 1344 } else {
1fb08ac6 1345 try_to_unmap(hpage, ttu);
c0d0381a
MK
1346 }
1347 }
1fb08ac6
YS
1348
1349 unmap_success = !page_mapped(hpage);
666e5a40 1350 if (!unmap_success)
495367c0 1351 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1170532b 1352 pfn, page_mapcount(hpage));
a6d30ddd 1353
286c469a
NH
1354 /*
1355 * try_to_unmap() might put mlocked page in lru cache, so call
1356 * shake_page() again to ensure that it's flushed.
1357 */
1358 if (mlocked)
d0505e9f 1359 shake_page(hpage);
286c469a 1360
6a46079c
AK
1361 /*
1362 * Now that the dirty bit has been propagated to the
1363 * struct page and all unmaps done we can decide if
1364 * killing is needed or not. Only kill when the page
6751ed65
TL
1365 * was dirty or the process is not restartable,
1366 * otherwise the tokill list is merely
6a46079c
AK
1367 * freed. When there was a problem unmapping earlier
1368 * use a more force-full uncatchable kill to prevent
1369 * any accesses to the poisoned memory.
1370 */
415c64c1 1371 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
ae1139ec 1372 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1668bfd5 1373
666e5a40 1374 return unmap_success;
6a46079c
AK
1375}
1376
0348d2eb
NH
1377static int identify_page_state(unsigned long pfn, struct page *p,
1378 unsigned long page_flags)
761ad8d7
NH
1379{
1380 struct page_state *ps;
0348d2eb
NH
1381
1382 /*
1383 * The first check uses the current page flags which may not have any
1384 * relevant information. The second check with the saved page flags is
1385 * carried out only if the first check can't determine the page status.
1386 */
1387 for (ps = error_states;; ps++)
1388 if ((p->flags & ps->mask) == ps->res)
1389 break;
1390
1391 page_flags |= (p->flags & (1UL << PG_dirty));
1392
1393 if (!ps->mask)
1394 for (ps = error_states;; ps++)
1395 if ((page_flags & ps->mask) == ps->res)
1396 break;
1397 return page_action(ps, p, pfn);
1398}
1399
694bf0b0
OS
1400static int try_to_split_thp_page(struct page *page, const char *msg)
1401{
1402 lock_page(page);
1403 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1404 unsigned long pfn = page_to_pfn(page);
1405
1406 unlock_page(page);
1407 if (!PageAnon(page))
1408 pr_info("%s: %#lx: non anonymous thp\n", msg, pfn);
1409 else
1410 pr_info("%s: %#lx: thp split failed\n", msg, pfn);
1411 put_page(page);
1412 return -EBUSY;
1413 }
1414 unlock_page(page);
1415
1416 return 0;
1417}
1418
83b57531 1419static int memory_failure_hugetlb(unsigned long pfn, int flags)
0348d2eb 1420{
761ad8d7
NH
1421 struct page *p = pfn_to_page(pfn);
1422 struct page *head = compound_head(p);
1423 int res;
1424 unsigned long page_flags;
1425
1426 if (TestSetPageHWPoison(head)) {
1427 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1428 pfn);
a3f5d80e
NH
1429 res = -EHWPOISON;
1430 if (flags & MF_ACTION_REQUIRED)
1431 res = kill_accessing_process(current, page_to_pfn(head), flags);
1432 return res;
761ad8d7
NH
1433 }
1434
1435 num_poisoned_pages_inc();
1436
0ed950d1
NH
1437 if (!(flags & MF_COUNT_INCREASED)) {
1438 res = get_hwpoison_page(p, flags);
1439 if (!res) {
0ed950d1 1440 lock_page(head);
9617b9bb
NH
1441 if (hwpoison_filter(p)) {
1442 if (TestClearPageHWPoison(head))
0ed950d1 1443 num_poisoned_pages_dec();
9617b9bb
NH
1444 unlock_page(head);
1445 return 0;
761ad8d7 1446 }
0ed950d1
NH
1447 unlock_page(head);
1448 res = MF_FAILED;
510d25c9 1449 if (__page_handle_poison(p)) {
0ed950d1
NH
1450 page_ref_inc(p);
1451 res = MF_RECOVERED;
1452 }
1453 action_result(pfn, MF_MSG_FREE_HUGE, res);
1454 return res == MF_RECOVERED ? 0 : -EBUSY;
1455 } else if (res < 0) {
1456 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1457 return -EBUSY;
761ad8d7 1458 }
761ad8d7
NH
1459 }
1460
1461 lock_page(head);
1462 page_flags = head->flags;
1463
1464 if (!PageHWPoison(head)) {
1465 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1466 num_poisoned_pages_dec();
1467 unlock_page(head);
dd6e2402 1468 put_page(head);
761ad8d7
NH
1469 return 0;
1470 }
1471
31286a84
NH
1472 /*
1473 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1474 * simply disable it. In order to make it work properly, we need
1475 * make sure that:
1476 * - conversion of a pud that maps an error hugetlb into hwpoison
1477 * entry properly works, and
1478 * - other mm code walking over page table is aware of pud-aligned
1479 * hwpoison entries.
1480 */
1481 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1482 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1483 res = -EBUSY;
1484 goto out;
1485 }
1486
ed8c2f49 1487 if (!hwpoison_user_mappings(p, pfn, flags, head)) {
761ad8d7
NH
1488 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1489 res = -EBUSY;
1490 goto out;
1491 }
1492
ea6d0630 1493 return identify_page_state(pfn, p, page_flags);
761ad8d7
NH
1494out:
1495 unlock_page(head);
1496 return res;
1497}
1498
6100e34b
DW
1499static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1500 struct dev_pagemap *pgmap)
1501{
1502 struct page *page = pfn_to_page(pfn);
6100e34b
DW
1503 unsigned long size = 0;
1504 struct to_kill *tk;
1505 LIST_HEAD(tokill);
1506 int rc = -EBUSY;
1507 loff_t start;
27359fd6 1508 dax_entry_t cookie;
6100e34b 1509
1e8aaedb
OS
1510 if (flags & MF_COUNT_INCREASED)
1511 /*
1512 * Drop the extra refcount in case we come from madvise().
1513 */
1514 put_page(page);
1515
34dc45be
DW
1516 /* device metadata space is not recoverable */
1517 if (!pgmap_pfn_valid(pgmap, pfn)) {
1518 rc = -ENXIO;
1519 goto out;
1520 }
1521
6100e34b
DW
1522 /*
1523 * Prevent the inode from being freed while we are interrogating
1524 * the address_space, typically this would be handled by
1525 * lock_page(), but dax pages do not use the page lock. This
1526 * also prevents changes to the mapping of this pfn until
1527 * poison signaling is complete.
1528 */
27359fd6
MW
1529 cookie = dax_lock_page(page);
1530 if (!cookie)
6100e34b
DW
1531 goto out;
1532
1533 if (hwpoison_filter(page)) {
1534 rc = 0;
1535 goto unlock;
1536 }
1537
25b2995a 1538 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
6100e34b
DW
1539 /*
1540 * TODO: Handle HMM pages which may need coordination
1541 * with device-side memory.
1542 */
1543 goto unlock;
6100e34b
DW
1544 }
1545
1546 /*
1547 * Use this flag as an indication that the dax page has been
1548 * remapped UC to prevent speculative consumption of poison.
1549 */
1550 SetPageHWPoison(page);
1551
1552 /*
1553 * Unlike System-RAM there is no possibility to swap in a
1554 * different physical page at a given virtual address, so all
1555 * userspace consumption of ZONE_DEVICE memory necessitates
1556 * SIGBUS (i.e. MF_MUST_KILL)
1557 */
1558 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1559 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1560
1561 list_for_each_entry(tk, &tokill, nd)
1562 if (tk->size_shift)
1563 size = max(size, 1UL << tk->size_shift);
1564 if (size) {
1565 /*
1566 * Unmap the largest mapping to avoid breaking up
1567 * device-dax mappings which are constant size. The
1568 * actual size of the mapping being torn down is
1569 * communicated in siginfo, see kill_proc()
1570 */
1571 start = (page->index << PAGE_SHIFT) & ~(size - 1);
4d75136b 1572 unmap_mapping_range(page->mapping, start, size, 0);
6100e34b 1573 }
ae611d07 1574 kill_procs(&tokill, flags & MF_MUST_KILL, false, pfn, flags);
6100e34b
DW
1575 rc = 0;
1576unlock:
27359fd6 1577 dax_unlock_page(page, cookie);
6100e34b
DW
1578out:
1579 /* drop pgmap ref acquired in caller */
1580 put_dev_pagemap(pgmap);
1581 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1582 return rc;
1583}
1584
cd42f4a3
TL
1585/**
1586 * memory_failure - Handle memory failure of a page.
1587 * @pfn: Page Number of the corrupted page
cd42f4a3
TL
1588 * @flags: fine tune action taken
1589 *
1590 * This function is called by the low level machine check code
1591 * of an architecture when it detects hardware memory corruption
1592 * of a page. It tries its best to recover, which includes
1593 * dropping pages, killing processes etc.
1594 *
1595 * The function is primarily of use for corruptions that
1596 * happen outside the current execution context (e.g. when
1597 * detected by a background scrubber)
1598 *
1599 * Must run in process context (e.g. a work queue) with interrupts
1600 * enabled and no spinlocks hold.
1601 */
83b57531 1602int memory_failure(unsigned long pfn, int flags)
6a46079c 1603{
6a46079c 1604 struct page *p;
7af446a8 1605 struct page *hpage;
415c64c1 1606 struct page *orig_head;
6100e34b 1607 struct dev_pagemap *pgmap;
171936dd 1608 int res = 0;
524fca1e 1609 unsigned long page_flags;
a8b2c2ce 1610 bool retry = true;
171936dd 1611 static DEFINE_MUTEX(mf_mutex);
6a46079c
AK
1612
1613 if (!sysctl_memory_failure_recovery)
83b57531 1614 panic("Memory failure on page %lx", pfn);
6a46079c 1615
96c804a6
DH
1616 p = pfn_to_online_page(pfn);
1617 if (!p) {
1618 if (pfn_valid(pfn)) {
1619 pgmap = get_dev_pagemap(pfn, NULL);
1620 if (pgmap)
1621 return memory_failure_dev_pagemap(pfn, flags,
1622 pgmap);
1623 }
495367c0
CY
1624 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1625 pfn);
a7560fc8 1626 return -ENXIO;
6a46079c
AK
1627 }
1628
171936dd
TL
1629 mutex_lock(&mf_mutex);
1630
a8b2c2ce 1631try_again:
171936dd
TL
1632 if (PageHuge(p)) {
1633 res = memory_failure_hugetlb(pfn, flags);
1634 goto unlock_mutex;
1635 }
1636
6a46079c 1637 if (TestSetPageHWPoison(p)) {
495367c0
CY
1638 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1639 pfn);
47af12ba 1640 res = -EHWPOISON;
a3f5d80e
NH
1641 if (flags & MF_ACTION_REQUIRED)
1642 res = kill_accessing_process(current, pfn, flags);
171936dd 1643 goto unlock_mutex;
6a46079c
AK
1644 }
1645
761ad8d7 1646 orig_head = hpage = compound_head(p);
b37ff71c 1647 num_poisoned_pages_inc();
6a46079c
AK
1648
1649 /*
1650 * We need/can do nothing about count=0 pages.
1651 * 1) it's a free page, and therefore in safe hand:
1652 * prep_new_page() will be the gate keeper.
761ad8d7 1653 * 2) it's part of a non-compound high order page.
6a46079c
AK
1654 * Implies some kernel user: cannot stop them from
1655 * R/W the page; let's pray that the page has been
1656 * used and will be freed some time later.
1657 * In fact it's dangerous to directly bump up page count from 0,
1c4c3b99 1658 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
6a46079c 1659 */
0ed950d1
NH
1660 if (!(flags & MF_COUNT_INCREASED)) {
1661 res = get_hwpoison_page(p, flags);
1662 if (!res) {
1663 if (is_free_buddy_page(p)) {
1664 if (take_page_off_buddy(p)) {
1665 page_ref_inc(p);
1666 res = MF_RECOVERED;
1667 } else {
1668 /* We lost the race, try again */
1669 if (retry) {
1670 ClearPageHWPoison(p);
1671 num_poisoned_pages_dec();
1672 retry = false;
1673 goto try_again;
1674 }
1675 res = MF_FAILED;
a8b2c2ce 1676 }
0ed950d1
NH
1677 action_result(pfn, MF_MSG_BUDDY, res);
1678 res = res == MF_RECOVERED ? 0 : -EBUSY;
1679 } else {
1680 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1681 res = -EBUSY;
a8b2c2ce 1682 }
0ed950d1
NH
1683 goto unlock_mutex;
1684 } else if (res < 0) {
1685 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
171936dd 1686 res = -EBUSY;
0ed950d1 1687 goto unlock_mutex;
8d22ba1b 1688 }
6a46079c
AK
1689 }
1690
761ad8d7 1691 if (PageTransHuge(hpage)) {
eac96c3e
YS
1692 /*
1693 * The flag must be set after the refcount is bumped
1694 * otherwise it may race with THP split.
1695 * And the flag can't be set in get_hwpoison_page() since
1696 * it is called by soft offline too and it is just called
1697 * for !MF_COUNT_INCREASE. So here seems to be the best
1698 * place.
1699 *
1700 * Don't need care about the above error handling paths for
1701 * get_hwpoison_page() since they handle either free page
1702 * or unhandlable page. The refcount is bumped iff the
1703 * page is a valid handlable page.
1704 */
1705 SetPageHasHWPoisoned(hpage);
5d1fd5dc
NH
1706 if (try_to_split_thp_page(p, "Memory Failure") < 0) {
1707 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
171936dd
TL
1708 res = -EBUSY;
1709 goto unlock_mutex;
5d1fd5dc 1710 }
415c64c1 1711 VM_BUG_ON_PAGE(!page_count(p), p);
415c64c1
NH
1712 }
1713
e43c3afb
WF
1714 /*
1715 * We ignore non-LRU pages for good reasons.
1716 * - PG_locked is only well defined for LRU pages and a few others
48c935ad 1717 * - to avoid races with __SetPageLocked()
e43c3afb
WF
1718 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1719 * The check (unnecessarily) ignores LRU pages being isolated and
1720 * walked by the page reclaim code, however that's not a big loss.
1721 */
d0505e9f 1722 shake_page(p);
e43c3afb 1723
761ad8d7 1724 lock_page(p);
847ce401 1725
f37d4298
AK
1726 /*
1727 * The page could have changed compound pages during the locking.
1728 * If this happens just bail out.
1729 */
415c64c1 1730 if (PageCompound(p) && compound_head(p) != orig_head) {
cc637b17 1731 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
f37d4298 1732 res = -EBUSY;
171936dd 1733 goto unlock_page;
f37d4298
AK
1734 }
1735
524fca1e
NH
1736 /*
1737 * We use page flags to determine what action should be taken, but
1738 * the flags can be modified by the error containment action. One
1739 * example is an mlocked page, where PG_mlocked is cleared by
1740 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1741 * correctly, we save a copy of the page flags at this time.
1742 */
7d9d46ac 1743 page_flags = p->flags;
524fca1e 1744
847ce401
WF
1745 /*
1746 * unpoison always clear PG_hwpoison inside page lock
1747 */
1748 if (!PageHWPoison(p)) {
495367c0 1749 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
b37ff71c 1750 num_poisoned_pages_dec();
761ad8d7 1751 unlock_page(p);
dd6e2402 1752 put_page(p);
171936dd 1753 goto unlock_mutex;
847ce401 1754 }
7c116f2b
WF
1755 if (hwpoison_filter(p)) {
1756 if (TestClearPageHWPoison(p))
b37ff71c 1757 num_poisoned_pages_dec();
761ad8d7 1758 unlock_page(p);
dd6e2402 1759 put_page(p);
171936dd 1760 goto unlock_mutex;
7c116f2b 1761 }
847ce401 1762
e8675d29 1763 /*
1764 * __munlock_pagevec may clear a writeback page's LRU flag without
1765 * page_lock. We need wait writeback completion for this page or it
1766 * may trigger vfs BUG while evict inode.
1767 */
1768 if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
0bc1f8b0
CY
1769 goto identify_page_state;
1770
6edd6cc6
NH
1771 /*
1772 * It's very difficult to mess with pages currently under IO
1773 * and in many cases impossible, so we just avoid it here.
1774 */
6a46079c
AK
1775 wait_on_page_writeback(p);
1776
1777 /*
1778 * Now take care of user space mappings.
e64a782f 1779 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
6a46079c 1780 */
ed8c2f49 1781 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
cc637b17 1782 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1668bfd5 1783 res = -EBUSY;
171936dd 1784 goto unlock_page;
1668bfd5 1785 }
6a46079c
AK
1786
1787 /*
1788 * Torn down by someone else?
1789 */
dc2a1cbf 1790 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
cc637b17 1791 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
d95ea51e 1792 res = -EBUSY;
171936dd 1793 goto unlock_page;
6a46079c
AK
1794 }
1795
0bc1f8b0 1796identify_page_state:
0348d2eb 1797 res = identify_page_state(pfn, p, page_flags);
ea6d0630
NH
1798 mutex_unlock(&mf_mutex);
1799 return res;
171936dd 1800unlock_page:
761ad8d7 1801 unlock_page(p);
171936dd
TL
1802unlock_mutex:
1803 mutex_unlock(&mf_mutex);
6a46079c
AK
1804 return res;
1805}
cd42f4a3 1806EXPORT_SYMBOL_GPL(memory_failure);
847ce401 1807
ea8f5fb8
HY
1808#define MEMORY_FAILURE_FIFO_ORDER 4
1809#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1810
1811struct memory_failure_entry {
1812 unsigned long pfn;
ea8f5fb8
HY
1813 int flags;
1814};
1815
1816struct memory_failure_cpu {
1817 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1818 MEMORY_FAILURE_FIFO_SIZE);
1819 spinlock_t lock;
1820 struct work_struct work;
1821};
1822
1823static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1824
1825/**
1826 * memory_failure_queue - Schedule handling memory failure of a page.
1827 * @pfn: Page Number of the corrupted page
ea8f5fb8
HY
1828 * @flags: Flags for memory failure handling
1829 *
1830 * This function is called by the low level hardware error handler
1831 * when it detects hardware memory corruption of a page. It schedules
1832 * the recovering of error page, including dropping pages, killing
1833 * processes etc.
1834 *
1835 * The function is primarily of use for corruptions that
1836 * happen outside the current execution context (e.g. when
1837 * detected by a background scrubber)
1838 *
1839 * Can run in IRQ context.
1840 */
83b57531 1841void memory_failure_queue(unsigned long pfn, int flags)
ea8f5fb8
HY
1842{
1843 struct memory_failure_cpu *mf_cpu;
1844 unsigned long proc_flags;
1845 struct memory_failure_entry entry = {
1846 .pfn = pfn,
ea8f5fb8
HY
1847 .flags = flags,
1848 };
1849
1850 mf_cpu = &get_cpu_var(memory_failure_cpu);
1851 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
498d319b 1852 if (kfifo_put(&mf_cpu->fifo, entry))
ea8f5fb8
HY
1853 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1854 else
8e33a52f 1855 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
ea8f5fb8
HY
1856 pfn);
1857 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1858 put_cpu_var(memory_failure_cpu);
1859}
1860EXPORT_SYMBOL_GPL(memory_failure_queue);
1861
1862static void memory_failure_work_func(struct work_struct *work)
1863{
1864 struct memory_failure_cpu *mf_cpu;
1865 struct memory_failure_entry entry = { 0, };
1866 unsigned long proc_flags;
1867 int gotten;
1868
06202231 1869 mf_cpu = container_of(work, struct memory_failure_cpu, work);
ea8f5fb8
HY
1870 for (;;) {
1871 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1872 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1873 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1874 if (!gotten)
1875 break;
cf870c70 1876 if (entry.flags & MF_SOFT_OFFLINE)
feec24a6 1877 soft_offline_page(entry.pfn, entry.flags);
cf870c70 1878 else
83b57531 1879 memory_failure(entry.pfn, entry.flags);
ea8f5fb8
HY
1880 }
1881}
1882
06202231
JM
1883/*
1884 * Process memory_failure work queued on the specified CPU.
1885 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
1886 */
1887void memory_failure_queue_kick(int cpu)
1888{
1889 struct memory_failure_cpu *mf_cpu;
1890
1891 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1892 cancel_work_sync(&mf_cpu->work);
1893 memory_failure_work_func(&mf_cpu->work);
1894}
1895
ea8f5fb8
HY
1896static int __init memory_failure_init(void)
1897{
1898 struct memory_failure_cpu *mf_cpu;
1899 int cpu;
1900
1901 for_each_possible_cpu(cpu) {
1902 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1903 spin_lock_init(&mf_cpu->lock);
1904 INIT_KFIFO(mf_cpu->fifo);
1905 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1906 }
1907
1908 return 0;
1909}
1910core_initcall(memory_failure_init);
1911
a5f65109
NH
1912#define unpoison_pr_info(fmt, pfn, rs) \
1913({ \
1914 if (__ratelimit(rs)) \
1915 pr_info(fmt, pfn); \
1916})
1917
847ce401
WF
1918/**
1919 * unpoison_memory - Unpoison a previously poisoned page
1920 * @pfn: Page number of the to be unpoisoned page
1921 *
1922 * Software-unpoison a page that has been poisoned by
1923 * memory_failure() earlier.
1924 *
1925 * This is only done on the software-level, so it only works
1926 * for linux injected failures, not real hardware failures
1927 *
1928 * Returns 0 for success, otherwise -errno.
1929 */
1930int unpoison_memory(unsigned long pfn)
1931{
1932 struct page *page;
1933 struct page *p;
1934 int freeit = 0;
2f714160 1935 unsigned long flags = 0;
a5f65109
NH
1936 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1937 DEFAULT_RATELIMIT_BURST);
847ce401
WF
1938
1939 if (!pfn_valid(pfn))
1940 return -ENXIO;
1941
1942 p = pfn_to_page(pfn);
1943 page = compound_head(p);
1944
1945 if (!PageHWPoison(p)) {
495367c0 1946 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
a5f65109 1947 pfn, &unpoison_rs);
847ce401
WF
1948 return 0;
1949 }
1950
230ac719 1951 if (page_count(page) > 1) {
495367c0 1952 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
a5f65109 1953 pfn, &unpoison_rs);
230ac719
NH
1954 return 0;
1955 }
1956
1957 if (page_mapped(page)) {
495367c0 1958 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
a5f65109 1959 pfn, &unpoison_rs);
230ac719
NH
1960 return 0;
1961 }
1962
1963 if (page_mapping(page)) {
495367c0 1964 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
a5f65109 1965 pfn, &unpoison_rs);
230ac719
NH
1966 return 0;
1967 }
1968
0cea3fdc
WL
1969 /*
1970 * unpoison_memory() can encounter thp only when the thp is being
1971 * worked by memory_failure() and the page lock is not held yet.
1972 * In such case, we yield to memory_failure() and make unpoison fail.
1973 */
e76d30e2 1974 if (!PageHuge(page) && PageTransHuge(page)) {
495367c0 1975 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
a5f65109 1976 pfn, &unpoison_rs);
ead07f6a 1977 return 0;
0cea3fdc
WL
1978 }
1979
0ed950d1 1980 if (!get_hwpoison_page(p, flags)) {
847ce401 1981 if (TestClearPageHWPoison(p))
8e30456b 1982 num_poisoned_pages_dec();
495367c0 1983 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
a5f65109 1984 pfn, &unpoison_rs);
847ce401
WF
1985 return 0;
1986 }
1987
7eaceacc 1988 lock_page(page);
847ce401
WF
1989 /*
1990 * This test is racy because PG_hwpoison is set outside of page lock.
1991 * That's acceptable because that won't trigger kernel panic. Instead,
1992 * the PG_hwpoison page will be caught and isolated on the entrance to
1993 * the free buddy page pool.
1994 */
c9fbdd5f 1995 if (TestClearPageHWPoison(page)) {
495367c0 1996 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
a5f65109 1997 pfn, &unpoison_rs);
b37ff71c 1998 num_poisoned_pages_dec();
847ce401
WF
1999 freeit = 1;
2000 }
2001 unlock_page(page);
2002
dd6e2402 2003 put_page(page);
3ba5eebc 2004 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
dd6e2402 2005 put_page(page);
847ce401
WF
2006
2007 return 0;
2008}
2009EXPORT_SYMBOL(unpoison_memory);
facb6011 2010
6b9a217e 2011static bool isolate_page(struct page *page, struct list_head *pagelist)
d950b958 2012{
6b9a217e
OS
2013 bool isolated = false;
2014 bool lru = PageLRU(page);
d950b958 2015
6b9a217e
OS
2016 if (PageHuge(page)) {
2017 isolated = isolate_huge_page(page, pagelist);
2018 } else {
2019 if (lru)
2020 isolated = !isolate_lru_page(page);
2021 else
2022 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
2023
2024 if (isolated)
2025 list_add(&page->lru, pagelist);
0ebff32c 2026 }
d950b958 2027
6b9a217e
OS
2028 if (isolated && lru)
2029 inc_node_page_state(page, NR_ISOLATED_ANON +
2030 page_is_file_lru(page));
2031
03613808 2032 /*
6b9a217e
OS
2033 * If we succeed to isolate the page, we grabbed another refcount on
2034 * the page, so we can safely drop the one we got from get_any_pages().
2035 * If we failed to isolate the page, it means that we cannot go further
2036 * and we will return an error, so drop the reference we got from
2037 * get_any_pages() as well.
03613808 2038 */
6b9a217e
OS
2039 put_page(page);
2040 return isolated;
d950b958
NH
2041}
2042
6b9a217e
OS
2043/*
2044 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
2045 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2046 * If the page is mapped, it migrates the contents over.
2047 */
2048static int __soft_offline_page(struct page *page)
af8fae7c 2049{
6b9a217e 2050 int ret = 0;
af8fae7c 2051 unsigned long pfn = page_to_pfn(page);
6b9a217e
OS
2052 struct page *hpage = compound_head(page);
2053 char const *msg_page[] = {"page", "hugepage"};
2054 bool huge = PageHuge(page);
2055 LIST_HEAD(pagelist);
54608759
JK
2056 struct migration_target_control mtc = {
2057 .nid = NUMA_NO_NODE,
2058 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2059 };
facb6011 2060
facb6011 2061 /*
af8fae7c
NH
2062 * Check PageHWPoison again inside page lock because PageHWPoison
2063 * is set by memory_failure() outside page lock. Note that
2064 * memory_failure() also double-checks PageHWPoison inside page lock,
2065 * so there's no race between soft_offline_page() and memory_failure().
facb6011 2066 */
0ebff32c 2067 lock_page(page);
6b9a217e
OS
2068 if (!PageHuge(page))
2069 wait_on_page_writeback(page);
af8fae7c
NH
2070 if (PageHWPoison(page)) {
2071 unlock_page(page);
dd6e2402 2072 put_page(page);
af8fae7c 2073 pr_info("soft offline: %#lx page already poisoned\n", pfn);
5a2ffca3 2074 return 0;
af8fae7c 2075 }
6b9a217e
OS
2076
2077 if (!PageHuge(page))
2078 /*
2079 * Try to invalidate first. This should work for
2080 * non dirty unmapped page cache pages.
2081 */
2082 ret = invalidate_inode_page(page);
facb6011 2083 unlock_page(page);
6b9a217e 2084
facb6011 2085 /*
facb6011
AK
2086 * RED-PEN would be better to keep it isolated here, but we
2087 * would need to fix isolation locking first.
2088 */
6b9a217e 2089 if (ret) {
fb46e735 2090 pr_info("soft_offline: %#lx: invalidated\n", pfn);
6b9a217e 2091 page_handle_poison(page, false, true);
af8fae7c 2092 return 0;
facb6011
AK
2093 }
2094
6b9a217e 2095 if (isolate_page(hpage, &pagelist)) {
54608759 2096 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
5ac95884 2097 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
79f5f8fa 2098 if (!ret) {
6b9a217e
OS
2099 bool release = !huge;
2100
2101 if (!page_handle_poison(page, huge, release))
2102 ret = -EBUSY;
79f5f8fa 2103 } else {
85fbe5d1
YX
2104 if (!list_empty(&pagelist))
2105 putback_movable_pages(&pagelist);
59c82b70 2106
6b9a217e
OS
2107 pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
2108 pfn, msg_page[huge], ret, page->flags, &page->flags);
facb6011 2109 if (ret > 0)
3f4b815a 2110 ret = -EBUSY;
facb6011
AK
2111 }
2112 } else {
3f4b815a
OS
2113 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
2114 pfn, msg_page[huge], page_count(page), page->flags, &page->flags);
6b9a217e 2115 ret = -EBUSY;
facb6011 2116 }
facb6011
AK
2117 return ret;
2118}
86e05773 2119
6b9a217e 2120static int soft_offline_in_use_page(struct page *page)
acc14dc4 2121{
acc14dc4
NH
2122 struct page *hpage = compound_head(page);
2123
694bf0b0
OS
2124 if (!PageHuge(page) && PageTransHuge(hpage))
2125 if (try_to_split_thp_page(page, "soft offline") < 0)
acc14dc4 2126 return -EBUSY;
6b9a217e 2127 return __soft_offline_page(page);
acc14dc4
NH
2128}
2129
d4ae9916 2130static int soft_offline_free_page(struct page *page)
acc14dc4 2131{
6b9a217e 2132 int rc = 0;
acc14dc4 2133
6b9a217e
OS
2134 if (!page_handle_poison(page, true, false))
2135 rc = -EBUSY;
06be6ff3 2136
d4ae9916 2137 return rc;
acc14dc4
NH
2138}
2139
dad4e5b3
DW
2140static void put_ref_page(struct page *page)
2141{
2142 if (page)
2143 put_page(page);
2144}
2145
86e05773
WL
2146/**
2147 * soft_offline_page - Soft offline a page.
feec24a6 2148 * @pfn: pfn to soft-offline
86e05773
WL
2149 * @flags: flags. Same as memory_failure().
2150 *
2151 * Returns 0 on success, otherwise negated errno.
2152 *
2153 * Soft offline a page, by migration or invalidation,
2154 * without killing anything. This is for the case when
2155 * a page is not corrupted yet (so it's still valid to access),
2156 * but has had a number of corrected errors and is better taken
2157 * out.
2158 *
2159 * The actual policy on when to do that is maintained by
2160 * user space.
2161 *
2162 * This should never impact any application or cause data loss,
2163 * however it might take some time.
2164 *
2165 * This is not a 100% solution for all memory, but tries to be
2166 * ``good enough'' for the majority of memory.
2167 */
feec24a6 2168int soft_offline_page(unsigned long pfn, int flags)
86e05773
WL
2169{
2170 int ret;
b94e0282 2171 bool try_again = true;
dad4e5b3
DW
2172 struct page *page, *ref_page = NULL;
2173
2174 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
86e05773 2175
feec24a6
NH
2176 if (!pfn_valid(pfn))
2177 return -ENXIO;
dad4e5b3
DW
2178 if (flags & MF_COUNT_INCREASED)
2179 ref_page = pfn_to_page(pfn);
2180
feec24a6
NH
2181 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2182 page = pfn_to_online_page(pfn);
dad4e5b3
DW
2183 if (!page) {
2184 put_ref_page(ref_page);
86a66810 2185 return -EIO;
dad4e5b3 2186 }
86a66810 2187
86e05773 2188 if (PageHWPoison(page)) {
8295d535 2189 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
dad4e5b3 2190 put_ref_page(ref_page);
5a2ffca3 2191 return 0;
86e05773 2192 }
86e05773 2193
b94e0282 2194retry:
bfc8c901 2195 get_online_mems();
0ed950d1 2196 ret = get_hwpoison_page(page, flags);
bfc8c901 2197 put_online_mems();
4e41a30c 2198
8295d535 2199 if (ret > 0) {
6b9a217e 2200 ret = soft_offline_in_use_page(page);
8295d535 2201 } else if (ret == 0) {
b94e0282
OS
2202 if (soft_offline_free_page(page) && try_again) {
2203 try_again = false;
49612dd0 2204 flags &= ~MF_COUNT_INCREASED;
b94e0282
OS
2205 goto retry;
2206 }
8295d535 2207 }
4e41a30c 2208
86e05773
WL
2209 return ret;
2210}