1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
36 #include <linux/kernel.h>
38 #include <linux/page-flags.h>
39 #include <linux/kernel-page-flags.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/task.h>
42 #include <linux/ksm.h>
43 #include <linux/rmap.h>
44 #include <linux/export.h>
45 #include <linux/pagemap.h>
46 #include <linux/swap.h>
47 #include <linux/backing-dev.h>
48 #include <linux/migrate.h>
49 #include <linux/suspend.h>
50 #include <linux/slab.h>
51 #include <linux/swapops.h>
52 #include <linux/hugetlb.h>
53 #include <linux/memory_hotplug.h>
54 #include <linux/mm_inline.h>
55 #include <linux/memremap.h>
56 #include <linux/kfifo.h>
57 #include <linux/ratelimit.h>
58 #include <linux/page-isolation.h>
59 #include <linux/pagewalk.h>
61 #include "ras/ras_event.h"
63 int sysctl_memory_failure_early_kill __read_mostly
= 0;
65 int sysctl_memory_failure_recovery __read_mostly
= 1;
67 atomic_long_t num_poisoned_pages __read_mostly
= ATOMIC_LONG_INIT(0);
69 static bool __page_handle_poison(struct page
*page
)
73 zone_pcp_disable(page_zone(page
));
74 ret
= dissolve_free_huge_page(page
);
76 ret
= take_page_off_buddy(page
);
77 zone_pcp_enable(page_zone(page
));
82 static bool page_handle_poison(struct page
*page
, bool hugepage_or_freepage
, bool release
)
84 if (hugepage_or_freepage
) {
86 * Doing this check for free pages is also fine since dissolve_free_huge_page
87 * returns 0 for non-hugetlb pages as well.
89 if (!__page_handle_poison(page
))
91 * We could fail to take off the target page from buddy
92 * for example due to racy page allocation, but that's
93 * acceptable because soft-offlined page is not broken
94 * and if someone really want to use it, they should
100 SetPageHWPoison(page
);
104 num_poisoned_pages_inc();
109 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
111 u32 hwpoison_filter_enable
= 0;
112 u32 hwpoison_filter_dev_major
= ~0U;
113 u32 hwpoison_filter_dev_minor
= ~0U;
114 u64 hwpoison_filter_flags_mask
;
115 u64 hwpoison_filter_flags_value
;
116 EXPORT_SYMBOL_GPL(hwpoison_filter_enable
);
117 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major
);
118 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor
);
119 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask
);
120 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value
);
122 static int hwpoison_filter_dev(struct page
*p
)
124 struct address_space
*mapping
;
127 if (hwpoison_filter_dev_major
== ~0U &&
128 hwpoison_filter_dev_minor
== ~0U)
132 * page_mapping() does not accept slab pages.
137 mapping
= page_mapping(p
);
138 if (mapping
== NULL
|| mapping
->host
== NULL
)
141 dev
= mapping
->host
->i_sb
->s_dev
;
142 if (hwpoison_filter_dev_major
!= ~0U &&
143 hwpoison_filter_dev_major
!= MAJOR(dev
))
145 if (hwpoison_filter_dev_minor
!= ~0U &&
146 hwpoison_filter_dev_minor
!= MINOR(dev
))
152 static int hwpoison_filter_flags(struct page
*p
)
154 if (!hwpoison_filter_flags_mask
)
157 if ((stable_page_flags(p
) & hwpoison_filter_flags_mask
) ==
158 hwpoison_filter_flags_value
)
165 * This allows stress tests to limit test scope to a collection of tasks
166 * by putting them under some memcg. This prevents killing unrelated/important
167 * processes such as /sbin/init. Note that the target task may share clean
168 * pages with init (eg. libc text), which is harmless. If the target task
169 * share _dirty_ pages with another task B, the test scheme must make sure B
170 * is also included in the memcg. At last, due to race conditions this filter
171 * can only guarantee that the page either belongs to the memcg tasks, or is
175 u64 hwpoison_filter_memcg
;
176 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg
);
177 static int hwpoison_filter_task(struct page
*p
)
179 if (!hwpoison_filter_memcg
)
182 if (page_cgroup_ino(p
) != hwpoison_filter_memcg
)
188 static int hwpoison_filter_task(struct page
*p
) { return 0; }
191 int hwpoison_filter(struct page
*p
)
193 if (!hwpoison_filter_enable
)
196 if (hwpoison_filter_dev(p
))
199 if (hwpoison_filter_flags(p
))
202 if (hwpoison_filter_task(p
))
208 int hwpoison_filter(struct page
*p
)
214 EXPORT_SYMBOL_GPL(hwpoison_filter
);
217 * Kill all processes that have a poisoned page mapped and then isolate
221 * Find all processes having the page mapped and kill them.
222 * But we keep a page reference around so that the page is not
223 * actually freed yet.
224 * Then stash the page away
226 * There's no convenient way to get back to mapped processes
227 * from the VMAs. So do a brute-force search over all
230 * Remember that machine checks are not common (or rather
231 * if they are common you have other problems), so this shouldn't
232 * be a performance issue.
234 * Also there are some races possible while we get from the
235 * error detection to actually handle it.
240 struct task_struct
*tsk
;
246 * Send all the processes who have the page mapped a signal.
247 * ``action optional'' if they are not immediately affected by the error
248 * ``action required'' if error happened in current execution context
250 static int kill_proc(struct to_kill
*tk
, unsigned long pfn
, int flags
)
252 struct task_struct
*t
= tk
->tsk
;
253 short addr_lsb
= tk
->size_shift
;
256 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
257 pfn
, t
->comm
, t
->pid
);
259 if (flags
& MF_ACTION_REQUIRED
) {
261 ret
= force_sig_mceerr(BUS_MCEERR_AR
,
262 (void __user
*)tk
->addr
, addr_lsb
);
264 /* Signal other processes sharing the page if they have PF_MCE_EARLY set. */
265 ret
= send_sig_mceerr(BUS_MCEERR_AO
, (void __user
*)tk
->addr
,
269 * Don't use force here, it's convenient if the signal
270 * can be temporarily blocked.
271 * This could cause a loop when the user sets SIGBUS
272 * to SIG_IGN, but hopefully no one will do that?
274 ret
= send_sig_mceerr(BUS_MCEERR_AO
, (void __user
*)tk
->addr
,
275 addr_lsb
, t
); /* synchronous? */
278 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
279 t
->comm
, t
->pid
, ret
);
284 * Unknown page type encountered. Try to check whether it can turn PageLRU by
285 * lru_add_drain_all, or a free page by reclaiming slabs when possible.
287 void shake_page(struct page
*p
, int access
)
294 if (PageLRU(p
) || is_free_buddy_page(p
))
299 * Only call shrink_node_slabs here (which would also shrink
300 * other caches) if access is not potentially fatal.
303 drop_slab_node(page_to_nid(p
));
305 EXPORT_SYMBOL_GPL(shake_page
);
307 static unsigned long dev_pagemap_mapping_shift(struct page
*page
,
308 struct vm_area_struct
*vma
)
310 unsigned long address
= vma_address(page
, vma
);
317 pgd
= pgd_offset(vma
->vm_mm
, address
);
318 if (!pgd_present(*pgd
))
320 p4d
= p4d_offset(pgd
, address
);
321 if (!p4d_present(*p4d
))
323 pud
= pud_offset(p4d
, address
);
324 if (!pud_present(*pud
))
326 if (pud_devmap(*pud
))
328 pmd
= pmd_offset(pud
, address
);
329 if (!pmd_present(*pmd
))
331 if (pmd_devmap(*pmd
))
333 pte
= pte_offset_map(pmd
, address
);
334 if (!pte_present(*pte
))
336 if (pte_devmap(*pte
))
342 * Failure handling: if we can't find or can't kill a process there's
343 * not much we can do. We just print a message and ignore otherwise.
347 * Schedule a process for later kill.
348 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
350 static void add_to_kill(struct task_struct
*tsk
, struct page
*p
,
351 struct vm_area_struct
*vma
,
352 struct list_head
*to_kill
)
356 tk
= kmalloc(sizeof(struct to_kill
), GFP_ATOMIC
);
358 pr_err("Memory failure: Out of memory while machine check handling\n");
362 tk
->addr
= page_address_in_vma(p
, vma
);
363 if (is_zone_device_page(p
))
364 tk
->size_shift
= dev_pagemap_mapping_shift(p
, vma
);
366 tk
->size_shift
= page_shift(compound_head(p
));
369 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
370 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
371 * so "tk->size_shift == 0" effectively checks no mapping on
372 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
373 * to a process' address space, it's possible not all N VMAs
374 * contain mappings for the page, but at least one VMA does.
375 * Only deliver SIGBUS with payload derived from the VMA that
376 * has a mapping for the page.
378 if (tk
->addr
== -EFAULT
) {
379 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
380 page_to_pfn(p
), tsk
->comm
);
381 } else if (tk
->size_shift
== 0) {
386 get_task_struct(tsk
);
388 list_add_tail(&tk
->nd
, to_kill
);
392 * Kill the processes that have been collected earlier.
394 * Only do anything when DOIT is set, otherwise just free the list
395 * (this is used for clean pages which do not need killing)
396 * Also when FAIL is set do a force kill because something went
399 static void kill_procs(struct list_head
*to_kill
, int forcekill
, bool fail
,
400 unsigned long pfn
, int flags
)
402 struct to_kill
*tk
, *next
;
404 list_for_each_entry_safe (tk
, next
, to_kill
, nd
) {
407 * In case something went wrong with munmapping
408 * make sure the process doesn't catch the
409 * signal and then access the memory. Just kill it.
411 if (fail
|| tk
->addr
== -EFAULT
) {
412 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
413 pfn
, tk
->tsk
->comm
, tk
->tsk
->pid
);
414 do_send_sig_info(SIGKILL
, SEND_SIG_PRIV
,
415 tk
->tsk
, PIDTYPE_PID
);
419 * In theory the process could have mapped
420 * something else on the address in-between. We could
421 * check for that, but we need to tell the
424 else if (kill_proc(tk
, pfn
, flags
) < 0)
425 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
426 pfn
, tk
->tsk
->comm
, tk
->tsk
->pid
);
428 put_task_struct(tk
->tsk
);
434 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
435 * on behalf of the thread group. Return task_struct of the (first found)
436 * dedicated thread if found, and return NULL otherwise.
438 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
439 * have to call rcu_read_lock/unlock() in this function.
441 static struct task_struct
*find_early_kill_thread(struct task_struct
*tsk
)
443 struct task_struct
*t
;
445 for_each_thread(tsk
, t
) {
446 if (t
->flags
& PF_MCE_PROCESS
) {
447 if (t
->flags
& PF_MCE_EARLY
)
450 if (sysctl_memory_failure_early_kill
)
458 * Determine whether a given process is "early kill" process which expects
459 * to be signaled when some page under the process is hwpoisoned.
460 * Return task_struct of the dedicated thread (main thread unless explicitly
461 * specified) if the process is "early kill" and otherwise returns NULL.
463 * Note that the above is true for Action Optional case. For Action Required
464 * case, it's only meaningful to the current thread which need to be signaled
465 * with SIGBUS, this error is Action Optional for other non current
466 * processes sharing the same error page,if the process is "early kill", the
467 * task_struct of the dedicated thread will also be returned.
469 static struct task_struct
*task_early_kill(struct task_struct
*tsk
,
475 * Comparing ->mm here because current task might represent
476 * a subthread, while tsk always points to the main thread.
478 if (force_early
&& tsk
->mm
== current
->mm
)
481 return find_early_kill_thread(tsk
);
485 * Collect processes when the error hit an anonymous page.
487 static void collect_procs_anon(struct page
*page
, struct list_head
*to_kill
,
490 struct vm_area_struct
*vma
;
491 struct task_struct
*tsk
;
495 av
= page_lock_anon_vma_read(page
);
496 if (av
== NULL
) /* Not actually mapped anymore */
499 pgoff
= page_to_pgoff(page
);
500 read_lock(&tasklist_lock
);
501 for_each_process (tsk
) {
502 struct anon_vma_chain
*vmac
;
503 struct task_struct
*t
= task_early_kill(tsk
, force_early
);
507 anon_vma_interval_tree_foreach(vmac
, &av
->rb_root
,
510 if (!page_mapped_in_vma(page
, vma
))
512 if (vma
->vm_mm
== t
->mm
)
513 add_to_kill(t
, page
, vma
, to_kill
);
516 read_unlock(&tasklist_lock
);
517 page_unlock_anon_vma_read(av
);
521 * Collect processes when the error hit a file mapped page.
523 static void collect_procs_file(struct page
*page
, struct list_head
*to_kill
,
526 struct vm_area_struct
*vma
;
527 struct task_struct
*tsk
;
528 struct address_space
*mapping
= page
->mapping
;
531 i_mmap_lock_read(mapping
);
532 read_lock(&tasklist_lock
);
533 pgoff
= page_to_pgoff(page
);
534 for_each_process(tsk
) {
535 struct task_struct
*t
= task_early_kill(tsk
, force_early
);
539 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
,
542 * Send early kill signal to tasks where a vma covers
543 * the page but the corrupted page is not necessarily
544 * mapped it in its pte.
545 * Assume applications who requested early kill want
546 * to be informed of all such data corruptions.
548 if (vma
->vm_mm
== t
->mm
)
549 add_to_kill(t
, page
, vma
, to_kill
);
552 read_unlock(&tasklist_lock
);
553 i_mmap_unlock_read(mapping
);
557 * Collect the processes who have the corrupted page mapped to kill.
559 static void collect_procs(struct page
*page
, struct list_head
*tokill
,
566 collect_procs_anon(page
, tokill
, force_early
);
568 collect_procs_file(page
, tokill
, force_early
);
577 static void set_to_kill(struct to_kill
*tk
, unsigned long addr
, short shift
)
580 tk
->size_shift
= shift
;
583 static int check_hwpoisoned_entry(pte_t pte
, unsigned long addr
, short shift
,
584 unsigned long poisoned_pfn
, struct to_kill
*tk
)
586 unsigned long pfn
= 0;
588 if (pte_present(pte
)) {
591 swp_entry_t swp
= pte_to_swp_entry(pte
);
593 if (is_hwpoison_entry(swp
))
594 pfn
= hwpoison_entry_to_pfn(swp
);
597 if (!pfn
|| pfn
!= poisoned_pfn
)
600 set_to_kill(tk
, addr
, shift
);
604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
605 static int check_hwpoisoned_pmd_entry(pmd_t
*pmdp
, unsigned long addr
,
606 struct hwp_walk
*hwp
)
610 unsigned long hwpoison_vaddr
;
612 if (!pmd_present(pmd
))
615 if (pfn
<= hwp
->pfn
&& hwp
->pfn
< pfn
+ HPAGE_PMD_NR
) {
616 hwpoison_vaddr
= addr
+ ((hwp
->pfn
- pfn
) << PAGE_SHIFT
);
617 set_to_kill(&hwp
->tk
, hwpoison_vaddr
, PAGE_SHIFT
);
623 static int check_hwpoisoned_pmd_entry(pmd_t
*pmdp
, unsigned long addr
,
624 struct hwp_walk
*hwp
)
630 static int hwpoison_pte_range(pmd_t
*pmdp
, unsigned long addr
,
631 unsigned long end
, struct mm_walk
*walk
)
633 struct hwp_walk
*hwp
= (struct hwp_walk
*)walk
->private;
638 ptl
= pmd_trans_huge_lock(pmdp
, walk
->vma
);
640 ret
= check_hwpoisoned_pmd_entry(pmdp
, addr
, hwp
);
645 if (pmd_trans_unstable(pmdp
))
648 ptep
= pte_offset_map_lock(walk
->vma
->vm_mm
, pmdp
, addr
, &ptl
);
649 for (; addr
!= end
; ptep
++, addr
+= PAGE_SIZE
) {
650 ret
= check_hwpoisoned_entry(*ptep
, addr
, PAGE_SHIFT
,
655 pte_unmap_unlock(ptep
- 1, ptl
);
661 #ifdef CONFIG_HUGETLB_PAGE
662 static int hwpoison_hugetlb_range(pte_t
*ptep
, unsigned long hmask
,
663 unsigned long addr
, unsigned long end
,
664 struct mm_walk
*walk
)
666 struct hwp_walk
*hwp
= (struct hwp_walk
*)walk
->private;
667 pte_t pte
= huge_ptep_get(ptep
);
668 struct hstate
*h
= hstate_vma(walk
->vma
);
670 return check_hwpoisoned_entry(pte
, addr
, huge_page_shift(h
),
674 #define hwpoison_hugetlb_range NULL
677 static struct mm_walk_ops hwp_walk_ops
= {
678 .pmd_entry
= hwpoison_pte_range
,
679 .hugetlb_entry
= hwpoison_hugetlb_range
,
683 * Sends SIGBUS to the current process with error info.
685 * This function is intended to handle "Action Required" MCEs on already
686 * hardware poisoned pages. They could happen, for example, when
687 * memory_failure() failed to unmap the error page at the first call, or
688 * when multiple local machine checks happened on different CPUs.
690 * MCE handler currently has no easy access to the error virtual address,
691 * so this function walks page table to find it. The returned virtual address
692 * is proper in most cases, but it could be wrong when the application
693 * process has multiple entries mapping the error page.
695 static int kill_accessing_process(struct task_struct
*p
, unsigned long pfn
,
699 struct hwp_walk priv
= {
704 mmap_read_lock(p
->mm
);
705 ret
= walk_page_range(p
->mm
, 0, TASK_SIZE
, &hwp_walk_ops
,
707 if (ret
== 1 && priv
.tk
.addr
)
708 kill_proc(&priv
.tk
, pfn
, flags
);
709 mmap_read_unlock(p
->mm
);
710 return ret
? -EFAULT
: -EHWPOISON
;
713 static const char *action_name
[] = {
714 [MF_IGNORED
] = "Ignored",
715 [MF_FAILED
] = "Failed",
716 [MF_DELAYED
] = "Delayed",
717 [MF_RECOVERED
] = "Recovered",
720 static const char * const action_page_types
[] = {
721 [MF_MSG_KERNEL
] = "reserved kernel page",
722 [MF_MSG_KERNEL_HIGH_ORDER
] = "high-order kernel page",
723 [MF_MSG_SLAB
] = "kernel slab page",
724 [MF_MSG_DIFFERENT_COMPOUND
] = "different compound page after locking",
725 [MF_MSG_POISONED_HUGE
] = "huge page already hardware poisoned",
726 [MF_MSG_HUGE
] = "huge page",
727 [MF_MSG_FREE_HUGE
] = "free huge page",
728 [MF_MSG_NON_PMD_HUGE
] = "non-pmd-sized huge page",
729 [MF_MSG_UNMAP_FAILED
] = "unmapping failed page",
730 [MF_MSG_DIRTY_SWAPCACHE
] = "dirty swapcache page",
731 [MF_MSG_CLEAN_SWAPCACHE
] = "clean swapcache page",
732 [MF_MSG_DIRTY_MLOCKED_LRU
] = "dirty mlocked LRU page",
733 [MF_MSG_CLEAN_MLOCKED_LRU
] = "clean mlocked LRU page",
734 [MF_MSG_DIRTY_UNEVICTABLE_LRU
] = "dirty unevictable LRU page",
735 [MF_MSG_CLEAN_UNEVICTABLE_LRU
] = "clean unevictable LRU page",
736 [MF_MSG_DIRTY_LRU
] = "dirty LRU page",
737 [MF_MSG_CLEAN_LRU
] = "clean LRU page",
738 [MF_MSG_TRUNCATED_LRU
] = "already truncated LRU page",
739 [MF_MSG_BUDDY
] = "free buddy page",
740 [MF_MSG_BUDDY_2ND
] = "free buddy page (2nd try)",
741 [MF_MSG_DAX
] = "dax page",
742 [MF_MSG_UNSPLIT_THP
] = "unsplit thp",
743 [MF_MSG_UNKNOWN
] = "unknown page",
747 * XXX: It is possible that a page is isolated from LRU cache,
748 * and then kept in swap cache or failed to remove from page cache.
749 * The page count will stop it from being freed by unpoison.
750 * Stress tests should be aware of this memory leak problem.
752 static int delete_from_lru_cache(struct page
*p
)
754 if (!isolate_lru_page(p
)) {
756 * Clear sensible page flags, so that the buddy system won't
757 * complain when the page is unpoison-and-freed.
760 ClearPageUnevictable(p
);
763 * Poisoned page might never drop its ref count to 0 so we have
764 * to uncharge it manually from its memcg.
766 mem_cgroup_uncharge(p
);
769 * drop the page count elevated by isolate_lru_page()
777 static int truncate_error_page(struct page
*p
, unsigned long pfn
,
778 struct address_space
*mapping
)
782 if (mapping
->a_ops
->error_remove_page
) {
783 int err
= mapping
->a_ops
->error_remove_page(mapping
, p
);
786 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
788 } else if (page_has_private(p
) &&
789 !try_to_release_page(p
, GFP_NOIO
)) {
790 pr_info("Memory failure: %#lx: failed to release buffers\n",
797 * If the file system doesn't support it just invalidate
798 * This fails on dirty or anything with private pages
800 if (invalidate_inode_page(p
))
803 pr_info("Memory failure: %#lx: Failed to invalidate\n",
811 * Error hit kernel page.
812 * Do nothing, try to be lucky and not touch this instead. For a few cases we
813 * could be more sophisticated.
815 static int me_kernel(struct page
*p
, unsigned long pfn
)
822 * Page in unknown state. Do nothing.
824 static int me_unknown(struct page
*p
, unsigned long pfn
)
826 pr_err("Memory failure: %#lx: Unknown page state\n", pfn
);
832 * Clean (or cleaned) page cache page.
834 static int me_pagecache_clean(struct page
*p
, unsigned long pfn
)
837 struct address_space
*mapping
;
839 delete_from_lru_cache(p
);
842 * For anonymous pages we're done the only reference left
843 * should be the one m_f() holds.
851 * Now truncate the page in the page cache. This is really
852 * more like a "temporary hole punch"
853 * Don't do this for block devices when someone else
854 * has a reference, because it could be file system metadata
855 * and that's not safe to truncate.
857 mapping
= page_mapping(p
);
860 * Page has been teared down in the meanwhile
867 * Truncation is a bit tricky. Enable it per file system for now.
869 * Open: to take i_rwsem or not for this? Right now we don't.
871 ret
= truncate_error_page(p
, pfn
, mapping
);
878 * Dirty pagecache page
879 * Issues: when the error hit a hole page the error is not properly
882 static int me_pagecache_dirty(struct page
*p
, unsigned long pfn
)
884 struct address_space
*mapping
= page_mapping(p
);
887 /* TBD: print more information about the file. */
890 * IO error will be reported by write(), fsync(), etc.
891 * who check the mapping.
892 * This way the application knows that something went
893 * wrong with its dirty file data.
895 * There's one open issue:
897 * The EIO will be only reported on the next IO
898 * operation and then cleared through the IO map.
899 * Normally Linux has two mechanisms to pass IO error
900 * first through the AS_EIO flag in the address space
901 * and then through the PageError flag in the page.
902 * Since we drop pages on memory failure handling the
903 * only mechanism open to use is through AS_AIO.
905 * This has the disadvantage that it gets cleared on
906 * the first operation that returns an error, while
907 * the PageError bit is more sticky and only cleared
908 * when the page is reread or dropped. If an
909 * application assumes it will always get error on
910 * fsync, but does other operations on the fd before
911 * and the page is dropped between then the error
912 * will not be properly reported.
914 * This can already happen even without hwpoisoned
915 * pages: first on metadata IO errors (which only
916 * report through AS_EIO) or when the page is dropped
919 * So right now we assume that the application DTRT on
920 * the first EIO, but we're not worse than other parts
923 mapping_set_error(mapping
, -EIO
);
926 return me_pagecache_clean(p
, pfn
);
930 * Clean and dirty swap cache.
932 * Dirty swap cache page is tricky to handle. The page could live both in page
933 * cache and swap cache(ie. page is freshly swapped in). So it could be
934 * referenced concurrently by 2 types of PTEs:
935 * normal PTEs and swap PTEs. We try to handle them consistently by calling
936 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
938 * - clear dirty bit to prevent IO
940 * - but keep in the swap cache, so that when we return to it on
941 * a later page fault, we know the application is accessing
942 * corrupted data and shall be killed (we installed simple
943 * interception code in do_swap_page to catch it).
945 * Clean swap cache pages can be directly isolated. A later page fault will
946 * bring in the known good data from disk.
948 static int me_swapcache_dirty(struct page
*p
, unsigned long pfn
)
953 /* Trigger EIO in shmem: */
954 ClearPageUptodate(p
);
956 ret
= delete_from_lru_cache(p
) ? MF_FAILED
: MF_DELAYED
;
961 static int me_swapcache_clean(struct page
*p
, unsigned long pfn
)
965 delete_from_swap_cache(p
);
967 ret
= delete_from_lru_cache(p
) ? MF_FAILED
: MF_RECOVERED
;
973 * Huge pages. Needs work.
975 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
976 * To narrow down kill region to one page, we need to break up pmd.
978 static int me_huge_page(struct page
*p
, unsigned long pfn
)
981 struct page
*hpage
= compound_head(p
);
982 struct address_space
*mapping
;
984 if (!PageHuge(hpage
))
987 mapping
= page_mapping(hpage
);
989 res
= truncate_error_page(hpage
, pfn
, mapping
);
995 * migration entry prevents later access on error anonymous
996 * hugepage, so we can free and dissolve it into buddy to
997 * save healthy subpages.
1001 if (__page_handle_poison(p
)) {
1011 * Various page states we can handle.
1013 * A page state is defined by its current page->flags bits.
1014 * The table matches them in order and calls the right handler.
1016 * This is quite tricky because we can access page at any time
1017 * in its live cycle, so all accesses have to be extremely careful.
1019 * This is not complete. More states could be added.
1020 * For any missing state don't attempt recovery.
1023 #define dirty (1UL << PG_dirty)
1024 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1025 #define unevict (1UL << PG_unevictable)
1026 #define mlock (1UL << PG_mlocked)
1027 #define lru (1UL << PG_lru)
1028 #define head (1UL << PG_head)
1029 #define slab (1UL << PG_slab)
1030 #define reserved (1UL << PG_reserved)
1032 static struct page_state
{
1035 enum mf_action_page_type type
;
1037 /* Callback ->action() has to unlock the relevant page inside it. */
1038 int (*action
)(struct page
*p
, unsigned long pfn
);
1039 } error_states
[] = {
1040 { reserved
, reserved
, MF_MSG_KERNEL
, me_kernel
},
1042 * free pages are specially detected outside this table:
1043 * PG_buddy pages only make a small fraction of all free pages.
1047 * Could in theory check if slab page is free or if we can drop
1048 * currently unused objects without touching them. But just
1049 * treat it as standard kernel for now.
1051 { slab
, slab
, MF_MSG_SLAB
, me_kernel
},
1053 { head
, head
, MF_MSG_HUGE
, me_huge_page
},
1055 { sc
|dirty
, sc
|dirty
, MF_MSG_DIRTY_SWAPCACHE
, me_swapcache_dirty
},
1056 { sc
|dirty
, sc
, MF_MSG_CLEAN_SWAPCACHE
, me_swapcache_clean
},
1058 { mlock
|dirty
, mlock
|dirty
, MF_MSG_DIRTY_MLOCKED_LRU
, me_pagecache_dirty
},
1059 { mlock
|dirty
, mlock
, MF_MSG_CLEAN_MLOCKED_LRU
, me_pagecache_clean
},
1061 { unevict
|dirty
, unevict
|dirty
, MF_MSG_DIRTY_UNEVICTABLE_LRU
, me_pagecache_dirty
},
1062 { unevict
|dirty
, unevict
, MF_MSG_CLEAN_UNEVICTABLE_LRU
, me_pagecache_clean
},
1064 { lru
|dirty
, lru
|dirty
, MF_MSG_DIRTY_LRU
, me_pagecache_dirty
},
1065 { lru
|dirty
, lru
, MF_MSG_CLEAN_LRU
, me_pagecache_clean
},
1068 * Catchall entry: must be at end.
1070 { 0, 0, MF_MSG_UNKNOWN
, me_unknown
},
1083 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1084 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1086 static void action_result(unsigned long pfn
, enum mf_action_page_type type
,
1087 enum mf_result result
)
1089 trace_memory_failure_event(pfn
, type
, result
);
1091 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
1092 pfn
, action_page_types
[type
], action_name
[result
]);
1095 static int page_action(struct page_state
*ps
, struct page
*p
,
1101 /* page p should be unlocked after returning from ps->action(). */
1102 result
= ps
->action(p
, pfn
);
1104 count
= page_count(p
) - 1;
1105 if (ps
->action
== me_swapcache_dirty
&& result
== MF_DELAYED
)
1108 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
1109 pfn
, action_page_types
[ps
->type
], count
);
1112 action_result(pfn
, ps
->type
, result
);
1114 /* Could do more checks here if page looks ok */
1116 * Could adjust zone counters here to correct for the missing page.
1119 return (result
== MF_RECOVERED
|| result
== MF_DELAYED
) ? 0 : -EBUSY
;
1123 * Return true if a page type of a given page is supported by hwpoison
1124 * mechanism (while handling could fail), otherwise false. This function
1125 * does not return true for hugetlb or device memory pages, so it's assumed
1126 * to be called only in the context where we never have such pages.
1128 static inline bool HWPoisonHandlable(struct page
*page
)
1130 return PageLRU(page
) || __PageMovable(page
);
1133 static int __get_hwpoison_page(struct page
*page
)
1135 struct page
*head
= compound_head(page
);
1137 bool hugetlb
= false;
1139 ret
= get_hwpoison_huge_page(head
, &hugetlb
);
1144 * This check prevents from calling get_hwpoison_unless_zero()
1145 * for any unsupported type of page in order to reduce the risk of
1146 * unexpected races caused by taking a page refcount.
1148 if (!HWPoisonHandlable(head
))
1151 if (PageTransHuge(head
)) {
1153 * Non anonymous thp exists only in allocation/free time. We
1154 * can't handle such a case correctly, so let's give it up.
1155 * This should be better than triggering BUG_ON when kernel
1156 * tries to touch the "partially handled" page.
1158 if (!PageAnon(head
)) {
1159 pr_err("Memory failure: %#lx: non anonymous thp\n",
1165 if (get_page_unless_zero(head
)) {
1166 if (head
== compound_head(page
))
1169 pr_info("Memory failure: %#lx cannot catch tail\n",
1177 static int get_any_page(struct page
*p
, unsigned long flags
)
1179 int ret
= 0, pass
= 0;
1180 bool count_increased
= false;
1182 if (flags
& MF_COUNT_INCREASED
)
1183 count_increased
= true;
1186 if (!count_increased
) {
1187 ret
= __get_hwpoison_page(p
);
1189 if (page_count(p
)) {
1190 /* We raced with an allocation, retry. */
1194 } else if (!PageHuge(p
) && !is_free_buddy_page(p
)) {
1195 /* We raced with put_page, retry. */
1201 } else if (ret
== -EBUSY
) {
1203 * We raced with (possibly temporary) unhandlable
1215 if (PageHuge(p
) || HWPoisonHandlable(p
)) {
1219 * A page we cannot handle. Check whether we can turn
1220 * it into something we can handle.
1225 count_increased
= false;
1236 * get_hwpoison_page() - Get refcount for memory error handling
1237 * @p: Raw error page (hit by memory error)
1238 * @flags: Flags controlling behavior of error handling
1240 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1241 * error on it, after checking that the error page is in a well-defined state
1242 * (defined as a page-type we can successfully handle the memor error on it,
1243 * such as LRU page and hugetlb page).
1245 * Memory error handling could be triggered at any time on any type of page,
1246 * so it's prone to race with typical memory management lifecycle (like
1247 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1248 * extra care for the error page's state (as done in __get_hwpoison_page()),
1249 * and has some retry logic in get_any_page().
1251 * Return: 0 on failure,
1252 * 1 on success for in-use pages in a well-defined state,
1253 * -EIO for pages on which we can not handle memory errors,
1254 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1255 * operations like allocation and free.
1257 static int get_hwpoison_page(struct page
*p
, unsigned long flags
)
1261 zone_pcp_disable(page_zone(p
));
1262 ret
= get_any_page(p
, flags
);
1263 zone_pcp_enable(page_zone(p
));
1269 * Do all that is necessary to remove user space mappings. Unmap
1270 * the pages and send SIGBUS to the processes if the data was dirty.
1272 static bool hwpoison_user_mappings(struct page
*p
, unsigned long pfn
,
1273 int flags
, struct page
**hpagep
)
1275 enum ttu_flags ttu
= TTU_IGNORE_MLOCK
| TTU_SYNC
;
1276 struct address_space
*mapping
;
1279 int kill
= 1, forcekill
;
1280 struct page
*hpage
= *hpagep
;
1281 bool mlocked
= PageMlocked(hpage
);
1284 * Here we are interested only in user-mapped pages, so skip any
1285 * other types of pages.
1287 if (PageReserved(p
) || PageSlab(p
))
1289 if (!(PageLRU(hpage
) || PageHuge(p
)))
1293 * This check implies we don't kill processes if their pages
1294 * are in the swap cache early. Those are always late kills.
1296 if (!page_mapped(hpage
))
1300 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn
);
1304 if (PageSwapCache(p
)) {
1305 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
1307 ttu
|= TTU_IGNORE_HWPOISON
;
1311 * Propagate the dirty bit from PTEs to struct page first, because we
1312 * need this to decide if we should kill or just drop the page.
1313 * XXX: the dirty test could be racy: set_page_dirty() may not always
1314 * be called inside page lock (it's recommended but not enforced).
1316 mapping
= page_mapping(hpage
);
1317 if (!(flags
& MF_MUST_KILL
) && !PageDirty(hpage
) && mapping
&&
1318 mapping_can_writeback(mapping
)) {
1319 if (page_mkclean(hpage
)) {
1320 SetPageDirty(hpage
);
1323 ttu
|= TTU_IGNORE_HWPOISON
;
1324 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
1330 * First collect all the processes that have the page
1331 * mapped in dirty form. This has to be done before try_to_unmap,
1332 * because ttu takes the rmap data structures down.
1334 * Error handling: We ignore errors here because
1335 * there's nothing that can be done.
1338 collect_procs(hpage
, &tokill
, flags
& MF_ACTION_REQUIRED
);
1340 if (!PageHuge(hpage
)) {
1341 try_to_unmap(hpage
, ttu
);
1343 if (!PageAnon(hpage
)) {
1345 * For hugetlb pages in shared mappings, try_to_unmap
1346 * could potentially call huge_pmd_unshare. Because of
1347 * this, take semaphore in write mode here and set
1348 * TTU_RMAP_LOCKED to indicate we have taken the lock
1349 * at this higher level.
1351 mapping
= hugetlb_page_mapping_lock_write(hpage
);
1353 try_to_unmap(hpage
, ttu
|TTU_RMAP_LOCKED
);
1354 i_mmap_unlock_write(mapping
);
1356 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn
);
1358 try_to_unmap(hpage
, ttu
);
1362 unmap_success
= !page_mapped(hpage
);
1364 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1365 pfn
, page_mapcount(hpage
));
1368 * try_to_unmap() might put mlocked page in lru cache, so call
1369 * shake_page() again to ensure that it's flushed.
1372 shake_page(hpage
, 0);
1375 * Now that the dirty bit has been propagated to the
1376 * struct page and all unmaps done we can decide if
1377 * killing is needed or not. Only kill when the page
1378 * was dirty or the process is not restartable,
1379 * otherwise the tokill list is merely
1380 * freed. When there was a problem unmapping earlier
1381 * use a more force-full uncatchable kill to prevent
1382 * any accesses to the poisoned memory.
1384 forcekill
= PageDirty(hpage
) || (flags
& MF_MUST_KILL
);
1385 kill_procs(&tokill
, forcekill
, !unmap_success
, pfn
, flags
);
1387 return unmap_success
;
1390 static int identify_page_state(unsigned long pfn
, struct page
*p
,
1391 unsigned long page_flags
)
1393 struct page_state
*ps
;
1396 * The first check uses the current page flags which may not have any
1397 * relevant information. The second check with the saved page flags is
1398 * carried out only if the first check can't determine the page status.
1400 for (ps
= error_states
;; ps
++)
1401 if ((p
->flags
& ps
->mask
) == ps
->res
)
1404 page_flags
|= (p
->flags
& (1UL << PG_dirty
));
1407 for (ps
= error_states
;; ps
++)
1408 if ((page_flags
& ps
->mask
) == ps
->res
)
1410 return page_action(ps
, p
, pfn
);
1413 static int try_to_split_thp_page(struct page
*page
, const char *msg
)
1416 if (!PageAnon(page
) || unlikely(split_huge_page(page
))) {
1417 unsigned long pfn
= page_to_pfn(page
);
1420 if (!PageAnon(page
))
1421 pr_info("%s: %#lx: non anonymous thp\n", msg
, pfn
);
1423 pr_info("%s: %#lx: thp split failed\n", msg
, pfn
);
1432 static int memory_failure_hugetlb(unsigned long pfn
, int flags
)
1434 struct page
*p
= pfn_to_page(pfn
);
1435 struct page
*head
= compound_head(p
);
1437 unsigned long page_flags
;
1439 if (TestSetPageHWPoison(head
)) {
1440 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1443 if (flags
& MF_ACTION_REQUIRED
)
1444 res
= kill_accessing_process(current
, page_to_pfn(head
), flags
);
1448 num_poisoned_pages_inc();
1450 if (!(flags
& MF_COUNT_INCREASED
)) {
1451 res
= get_hwpoison_page(p
, flags
);
1454 * Check "filter hit" and "race with other subpage."
1457 if (PageHWPoison(head
)) {
1458 if ((hwpoison_filter(p
) && TestClearPageHWPoison(p
))
1459 || (p
!= head
&& TestSetPageHWPoison(head
))) {
1460 num_poisoned_pages_dec();
1467 if (__page_handle_poison(p
)) {
1471 action_result(pfn
, MF_MSG_FREE_HUGE
, res
);
1472 return res
== MF_RECOVERED
? 0 : -EBUSY
;
1473 } else if (res
< 0) {
1474 action_result(pfn
, MF_MSG_UNKNOWN
, MF_IGNORED
);
1480 page_flags
= head
->flags
;
1482 if (!PageHWPoison(head
)) {
1483 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn
);
1484 num_poisoned_pages_dec();
1491 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1492 * simply disable it. In order to make it work properly, we need
1494 * - conversion of a pud that maps an error hugetlb into hwpoison
1495 * entry properly works, and
1496 * - other mm code walking over page table is aware of pud-aligned
1499 if (huge_page_size(page_hstate(head
)) > PMD_SIZE
) {
1500 action_result(pfn
, MF_MSG_NON_PMD_HUGE
, MF_IGNORED
);
1505 if (!hwpoison_user_mappings(p
, pfn
, flags
, &head
)) {
1506 action_result(pfn
, MF_MSG_UNMAP_FAILED
, MF_IGNORED
);
1511 return identify_page_state(pfn
, p
, page_flags
);
1517 static int memory_failure_dev_pagemap(unsigned long pfn
, int flags
,
1518 struct dev_pagemap
*pgmap
)
1520 struct page
*page
= pfn_to_page(pfn
);
1521 const bool unmap_success
= true;
1522 unsigned long size
= 0;
1529 if (flags
& MF_COUNT_INCREASED
)
1531 * Drop the extra refcount in case we come from madvise().
1535 /* device metadata space is not recoverable */
1536 if (!pgmap_pfn_valid(pgmap
, pfn
)) {
1542 * Prevent the inode from being freed while we are interrogating
1543 * the address_space, typically this would be handled by
1544 * lock_page(), but dax pages do not use the page lock. This
1545 * also prevents changes to the mapping of this pfn until
1546 * poison signaling is complete.
1548 cookie
= dax_lock_page(page
);
1552 if (hwpoison_filter(page
)) {
1557 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
1559 * TODO: Handle HMM pages which may need coordination
1560 * with device-side memory.
1566 * Use this flag as an indication that the dax page has been
1567 * remapped UC to prevent speculative consumption of poison.
1569 SetPageHWPoison(page
);
1572 * Unlike System-RAM there is no possibility to swap in a
1573 * different physical page at a given virtual address, so all
1574 * userspace consumption of ZONE_DEVICE memory necessitates
1575 * SIGBUS (i.e. MF_MUST_KILL)
1577 flags
|= MF_ACTION_REQUIRED
| MF_MUST_KILL
;
1578 collect_procs(page
, &tokill
, flags
& MF_ACTION_REQUIRED
);
1580 list_for_each_entry(tk
, &tokill
, nd
)
1582 size
= max(size
, 1UL << tk
->size_shift
);
1585 * Unmap the largest mapping to avoid breaking up
1586 * device-dax mappings which are constant size. The
1587 * actual size of the mapping being torn down is
1588 * communicated in siginfo, see kill_proc()
1590 start
= (page
->index
<< PAGE_SHIFT
) & ~(size
- 1);
1591 unmap_mapping_range(page
->mapping
, start
, size
, 0);
1593 kill_procs(&tokill
, flags
& MF_MUST_KILL
, !unmap_success
, pfn
, flags
);
1596 dax_unlock_page(page
, cookie
);
1598 /* drop pgmap ref acquired in caller */
1599 put_dev_pagemap(pgmap
);
1600 action_result(pfn
, MF_MSG_DAX
, rc
? MF_FAILED
: MF_RECOVERED
);
1605 * memory_failure - Handle memory failure of a page.
1606 * @pfn: Page Number of the corrupted page
1607 * @flags: fine tune action taken
1609 * This function is called by the low level machine check code
1610 * of an architecture when it detects hardware memory corruption
1611 * of a page. It tries its best to recover, which includes
1612 * dropping pages, killing processes etc.
1614 * The function is primarily of use for corruptions that
1615 * happen outside the current execution context (e.g. when
1616 * detected by a background scrubber)
1618 * Must run in process context (e.g. a work queue) with interrupts
1619 * enabled and no spinlocks hold.
1621 int memory_failure(unsigned long pfn
, int flags
)
1625 struct page
*orig_head
;
1626 struct dev_pagemap
*pgmap
;
1628 unsigned long page_flags
;
1630 static DEFINE_MUTEX(mf_mutex
);
1632 if (!sysctl_memory_failure_recovery
)
1633 panic("Memory failure on page %lx", pfn
);
1635 p
= pfn_to_online_page(pfn
);
1637 if (pfn_valid(pfn
)) {
1638 pgmap
= get_dev_pagemap(pfn
, NULL
);
1640 return memory_failure_dev_pagemap(pfn
, flags
,
1643 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1648 mutex_lock(&mf_mutex
);
1652 res
= memory_failure_hugetlb(pfn
, flags
);
1656 if (TestSetPageHWPoison(p
)) {
1657 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1660 if (flags
& MF_ACTION_REQUIRED
)
1661 res
= kill_accessing_process(current
, pfn
, flags
);
1665 orig_head
= hpage
= compound_head(p
);
1666 num_poisoned_pages_inc();
1669 * We need/can do nothing about count=0 pages.
1670 * 1) it's a free page, and therefore in safe hand:
1671 * prep_new_page() will be the gate keeper.
1672 * 2) it's part of a non-compound high order page.
1673 * Implies some kernel user: cannot stop them from
1674 * R/W the page; let's pray that the page has been
1675 * used and will be freed some time later.
1676 * In fact it's dangerous to directly bump up page count from 0,
1677 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
1679 if (!(flags
& MF_COUNT_INCREASED
)) {
1680 res
= get_hwpoison_page(p
, flags
);
1682 if (is_free_buddy_page(p
)) {
1683 if (take_page_off_buddy(p
)) {
1687 /* We lost the race, try again */
1689 ClearPageHWPoison(p
);
1690 num_poisoned_pages_dec();
1696 action_result(pfn
, MF_MSG_BUDDY
, res
);
1697 res
= res
== MF_RECOVERED
? 0 : -EBUSY
;
1699 action_result(pfn
, MF_MSG_KERNEL_HIGH_ORDER
, MF_IGNORED
);
1703 } else if (res
< 0) {
1704 action_result(pfn
, MF_MSG_UNKNOWN
, MF_IGNORED
);
1710 if (PageTransHuge(hpage
)) {
1711 if (try_to_split_thp_page(p
, "Memory Failure") < 0) {
1712 action_result(pfn
, MF_MSG_UNSPLIT_THP
, MF_IGNORED
);
1716 VM_BUG_ON_PAGE(!page_count(p
), p
);
1720 * We ignore non-LRU pages for good reasons.
1721 * - PG_locked is only well defined for LRU pages and a few others
1722 * - to avoid races with __SetPageLocked()
1723 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1724 * The check (unnecessarily) ignores LRU pages being isolated and
1725 * walked by the page reclaim code, however that's not a big loss.
1732 * The page could have changed compound pages during the locking.
1733 * If this happens just bail out.
1735 if (PageCompound(p
) && compound_head(p
) != orig_head
) {
1736 action_result(pfn
, MF_MSG_DIFFERENT_COMPOUND
, MF_IGNORED
);
1742 * We use page flags to determine what action should be taken, but
1743 * the flags can be modified by the error containment action. One
1744 * example is an mlocked page, where PG_mlocked is cleared by
1745 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1746 * correctly, we save a copy of the page flags at this time.
1748 page_flags
= p
->flags
;
1751 * unpoison always clear PG_hwpoison inside page lock
1753 if (!PageHWPoison(p
)) {
1754 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn
);
1755 num_poisoned_pages_dec();
1760 if (hwpoison_filter(p
)) {
1761 if (TestClearPageHWPoison(p
))
1762 num_poisoned_pages_dec();
1769 * __munlock_pagevec may clear a writeback page's LRU flag without
1770 * page_lock. We need wait writeback completion for this page or it
1771 * may trigger vfs BUG while evict inode.
1773 if (!PageTransTail(p
) && !PageLRU(p
) && !PageWriteback(p
))
1774 goto identify_page_state
;
1777 * It's very difficult to mess with pages currently under IO
1778 * and in many cases impossible, so we just avoid it here.
1780 wait_on_page_writeback(p
);
1783 * Now take care of user space mappings.
1784 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1786 if (!hwpoison_user_mappings(p
, pfn
, flags
, &p
)) {
1787 action_result(pfn
, MF_MSG_UNMAP_FAILED
, MF_IGNORED
);
1793 * Torn down by someone else?
1795 if (PageLRU(p
) && !PageSwapCache(p
) && p
->mapping
== NULL
) {
1796 action_result(pfn
, MF_MSG_TRUNCATED_LRU
, MF_IGNORED
);
1801 identify_page_state
:
1802 res
= identify_page_state(pfn
, p
, page_flags
);
1803 mutex_unlock(&mf_mutex
);
1808 mutex_unlock(&mf_mutex
);
1811 EXPORT_SYMBOL_GPL(memory_failure
);
1813 #define MEMORY_FAILURE_FIFO_ORDER 4
1814 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1816 struct memory_failure_entry
{
1821 struct memory_failure_cpu
{
1822 DECLARE_KFIFO(fifo
, struct memory_failure_entry
,
1823 MEMORY_FAILURE_FIFO_SIZE
);
1825 struct work_struct work
;
1828 static DEFINE_PER_CPU(struct memory_failure_cpu
, memory_failure_cpu
);
1831 * memory_failure_queue - Schedule handling memory failure of a page.
1832 * @pfn: Page Number of the corrupted page
1833 * @flags: Flags for memory failure handling
1835 * This function is called by the low level hardware error handler
1836 * when it detects hardware memory corruption of a page. It schedules
1837 * the recovering of error page, including dropping pages, killing
1840 * The function is primarily of use for corruptions that
1841 * happen outside the current execution context (e.g. when
1842 * detected by a background scrubber)
1844 * Can run in IRQ context.
1846 void memory_failure_queue(unsigned long pfn
, int flags
)
1848 struct memory_failure_cpu
*mf_cpu
;
1849 unsigned long proc_flags
;
1850 struct memory_failure_entry entry
= {
1855 mf_cpu
= &get_cpu_var(memory_failure_cpu
);
1856 spin_lock_irqsave(&mf_cpu
->lock
, proc_flags
);
1857 if (kfifo_put(&mf_cpu
->fifo
, entry
))
1858 schedule_work_on(smp_processor_id(), &mf_cpu
->work
);
1860 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1862 spin_unlock_irqrestore(&mf_cpu
->lock
, proc_flags
);
1863 put_cpu_var(memory_failure_cpu
);
1865 EXPORT_SYMBOL_GPL(memory_failure_queue
);
1867 static void memory_failure_work_func(struct work_struct
*work
)
1869 struct memory_failure_cpu
*mf_cpu
;
1870 struct memory_failure_entry entry
= { 0, };
1871 unsigned long proc_flags
;
1874 mf_cpu
= container_of(work
, struct memory_failure_cpu
, work
);
1876 spin_lock_irqsave(&mf_cpu
->lock
, proc_flags
);
1877 gotten
= kfifo_get(&mf_cpu
->fifo
, &entry
);
1878 spin_unlock_irqrestore(&mf_cpu
->lock
, proc_flags
);
1881 if (entry
.flags
& MF_SOFT_OFFLINE
)
1882 soft_offline_page(entry
.pfn
, entry
.flags
);
1884 memory_failure(entry
.pfn
, entry
.flags
);
1889 * Process memory_failure work queued on the specified CPU.
1890 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
1892 void memory_failure_queue_kick(int cpu
)
1894 struct memory_failure_cpu
*mf_cpu
;
1896 mf_cpu
= &per_cpu(memory_failure_cpu
, cpu
);
1897 cancel_work_sync(&mf_cpu
->work
);
1898 memory_failure_work_func(&mf_cpu
->work
);
1901 static int __init
memory_failure_init(void)
1903 struct memory_failure_cpu
*mf_cpu
;
1906 for_each_possible_cpu(cpu
) {
1907 mf_cpu
= &per_cpu(memory_failure_cpu
, cpu
);
1908 spin_lock_init(&mf_cpu
->lock
);
1909 INIT_KFIFO(mf_cpu
->fifo
);
1910 INIT_WORK(&mf_cpu
->work
, memory_failure_work_func
);
1915 core_initcall(memory_failure_init
);
1917 #define unpoison_pr_info(fmt, pfn, rs) \
1919 if (__ratelimit(rs)) \
1920 pr_info(fmt, pfn); \
1924 * unpoison_memory - Unpoison a previously poisoned page
1925 * @pfn: Page number of the to be unpoisoned page
1927 * Software-unpoison a page that has been poisoned by
1928 * memory_failure() earlier.
1930 * This is only done on the software-level, so it only works
1931 * for linux injected failures, not real hardware failures
1933 * Returns 0 for success, otherwise -errno.
1935 int unpoison_memory(unsigned long pfn
)
1940 unsigned long flags
= 0;
1941 static DEFINE_RATELIMIT_STATE(unpoison_rs
, DEFAULT_RATELIMIT_INTERVAL
,
1942 DEFAULT_RATELIMIT_BURST
);
1944 if (!pfn_valid(pfn
))
1947 p
= pfn_to_page(pfn
);
1948 page
= compound_head(p
);
1950 if (!PageHWPoison(p
)) {
1951 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1956 if (page_count(page
) > 1) {
1957 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1962 if (page_mapped(page
)) {
1963 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1968 if (page_mapping(page
)) {
1969 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1975 * unpoison_memory() can encounter thp only when the thp is being
1976 * worked by memory_failure() and the page lock is not held yet.
1977 * In such case, we yield to memory_failure() and make unpoison fail.
1979 if (!PageHuge(page
) && PageTransHuge(page
)) {
1980 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1985 if (!get_hwpoison_page(p
, flags
)) {
1986 if (TestClearPageHWPoison(p
))
1987 num_poisoned_pages_dec();
1988 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1995 * This test is racy because PG_hwpoison is set outside of page lock.
1996 * That's acceptable because that won't trigger kernel panic. Instead,
1997 * the PG_hwpoison page will be caught and isolated on the entrance to
1998 * the free buddy page pool.
2000 if (TestClearPageHWPoison(page
)) {
2001 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2003 num_poisoned_pages_dec();
2009 if (freeit
&& !(pfn
== my_zero_pfn(0) && page_count(p
) == 1))
2014 EXPORT_SYMBOL(unpoison_memory
);
2016 static bool isolate_page(struct page
*page
, struct list_head
*pagelist
)
2018 bool isolated
= false;
2019 bool lru
= PageLRU(page
);
2021 if (PageHuge(page
)) {
2022 isolated
= isolate_huge_page(page
, pagelist
);
2025 isolated
= !isolate_lru_page(page
);
2027 isolated
= !isolate_movable_page(page
, ISOLATE_UNEVICTABLE
);
2030 list_add(&page
->lru
, pagelist
);
2033 if (isolated
&& lru
)
2034 inc_node_page_state(page
, NR_ISOLATED_ANON
+
2035 page_is_file_lru(page
));
2038 * If we succeed to isolate the page, we grabbed another refcount on
2039 * the page, so we can safely drop the one we got from get_any_pages().
2040 * If we failed to isolate the page, it means that we cannot go further
2041 * and we will return an error, so drop the reference we got from
2042 * get_any_pages() as well.
2049 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
2050 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2051 * If the page is mapped, it migrates the contents over.
2053 static int __soft_offline_page(struct page
*page
)
2056 unsigned long pfn
= page_to_pfn(page
);
2057 struct page
*hpage
= compound_head(page
);
2058 char const *msg_page
[] = {"page", "hugepage"};
2059 bool huge
= PageHuge(page
);
2060 LIST_HEAD(pagelist
);
2061 struct migration_target_control mtc
= {
2062 .nid
= NUMA_NO_NODE
,
2063 .gfp_mask
= GFP_USER
| __GFP_MOVABLE
| __GFP_RETRY_MAYFAIL
,
2067 * Check PageHWPoison again inside page lock because PageHWPoison
2068 * is set by memory_failure() outside page lock. Note that
2069 * memory_failure() also double-checks PageHWPoison inside page lock,
2070 * so there's no race between soft_offline_page() and memory_failure().
2073 if (!PageHuge(page
))
2074 wait_on_page_writeback(page
);
2075 if (PageHWPoison(page
)) {
2078 pr_info("soft offline: %#lx page already poisoned\n", pfn
);
2082 if (!PageHuge(page
))
2084 * Try to invalidate first. This should work for
2085 * non dirty unmapped page cache pages.
2087 ret
= invalidate_inode_page(page
);
2091 * RED-PEN would be better to keep it isolated here, but we
2092 * would need to fix isolation locking first.
2095 pr_info("soft_offline: %#lx: invalidated\n", pfn
);
2096 page_handle_poison(page
, false, true);
2100 if (isolate_page(hpage
, &pagelist
)) {
2101 ret
= migrate_pages(&pagelist
, alloc_migration_target
, NULL
,
2102 (unsigned long)&mtc
, MIGRATE_SYNC
, MR_MEMORY_FAILURE
);
2104 bool release
= !huge
;
2106 if (!page_handle_poison(page
, huge
, release
))
2109 if (!list_empty(&pagelist
))
2110 putback_movable_pages(&pagelist
);
2112 pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
2113 pfn
, msg_page
[huge
], ret
, page
->flags
, &page
->flags
);
2118 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
2119 pfn
, msg_page
[huge
], page_count(page
), page
->flags
, &page
->flags
);
2125 static int soft_offline_in_use_page(struct page
*page
)
2127 struct page
*hpage
= compound_head(page
);
2129 if (!PageHuge(page
) && PageTransHuge(hpage
))
2130 if (try_to_split_thp_page(page
, "soft offline") < 0)
2132 return __soft_offline_page(page
);
2135 static int soft_offline_free_page(struct page
*page
)
2139 if (!page_handle_poison(page
, true, false))
2145 static void put_ref_page(struct page
*page
)
2152 * soft_offline_page - Soft offline a page.
2153 * @pfn: pfn to soft-offline
2154 * @flags: flags. Same as memory_failure().
2156 * Returns 0 on success, otherwise negated errno.
2158 * Soft offline a page, by migration or invalidation,
2159 * without killing anything. This is for the case when
2160 * a page is not corrupted yet (so it's still valid to access),
2161 * but has had a number of corrected errors and is better taken
2164 * The actual policy on when to do that is maintained by
2167 * This should never impact any application or cause data loss,
2168 * however it might take some time.
2170 * This is not a 100% solution for all memory, but tries to be
2171 * ``good enough'' for the majority of memory.
2173 int soft_offline_page(unsigned long pfn
, int flags
)
2176 bool try_again
= true;
2177 struct page
*page
, *ref_page
= NULL
;
2179 WARN_ON_ONCE(!pfn_valid(pfn
) && (flags
& MF_COUNT_INCREASED
));
2181 if (!pfn_valid(pfn
))
2183 if (flags
& MF_COUNT_INCREASED
)
2184 ref_page
= pfn_to_page(pfn
);
2186 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2187 page
= pfn_to_online_page(pfn
);
2189 put_ref_page(ref_page
);
2193 if (PageHWPoison(page
)) {
2194 pr_info("%s: %#lx page already poisoned\n", __func__
, pfn
);
2195 put_ref_page(ref_page
);
2201 ret
= get_hwpoison_page(page
, flags
);
2205 ret
= soft_offline_in_use_page(page
);
2206 } else if (ret
== 0) {
2207 if (soft_offline_free_page(page
) && try_again
) {
2211 } else if (ret
== -EIO
) {
2212 pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
2213 __func__
, pfn
, page
->flags
, &page
->flags
);