2 * Based on arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/module.h>
22 #include <linux/signal.h>
24 #include <linux/hardirq.h>
25 #include <linux/init.h>
26 #include <linux/kprobes.h>
27 #include <linux/uaccess.h>
28 #include <linux/page-flags.h>
29 #include <linux/sched.h>
30 #include <linux/highmem.h>
31 #include <linux/perf_event.h>
33 #include <asm/cpufeature.h>
34 #include <asm/exception.h>
35 #include <asm/debug-monitors.h>
37 #include <asm/sysreg.h>
38 #include <asm/system_misc.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
42 static const char *fault_name(unsigned int esr
);
45 * Dump out the page tables associated with 'addr' in mm 'mm'.
47 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
54 pr_alert("pgd = %p\n", mm
->pgd
);
55 pgd
= pgd_offset(mm
, addr
);
56 pr_alert("[%08lx] *pgd=%016llx", addr
, pgd_val(*pgd
));
63 if (pgd_none(*pgd
) || pgd_bad(*pgd
))
66 pud
= pud_offset(pgd
, addr
);
67 printk(", *pud=%016llx", pud_val(*pud
));
68 if (pud_none(*pud
) || pud_bad(*pud
))
71 pmd
= pmd_offset(pud
, addr
);
72 printk(", *pmd=%016llx", pmd_val(*pmd
));
73 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
76 pte
= pte_offset_map(pmd
, addr
);
77 printk(", *pte=%016llx", pte_val(*pte
));
84 #ifdef CONFIG_ARM64_HW_AFDBM
86 * This function sets the access flags (dirty, accessed), as well as write
87 * permission, and only to a more permissive setting.
89 * It needs to cope with hardware update of the accessed/dirty state by other
90 * agents in the system and can safely skip the __sync_icache_dcache() call as,
91 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
93 * Returns whether or not the PTE actually changed.
95 int ptep_set_access_flags(struct vm_area_struct
*vma
,
96 unsigned long address
, pte_t
*ptep
,
97 pte_t entry
, int dirty
)
102 if (pte_same(*ptep
, entry
))
105 /* only preserve the access flags and write permission */
106 pte_val(entry
) &= PTE_AF
| PTE_WRITE
| PTE_DIRTY
;
109 * PTE_RDONLY is cleared by default in the asm below, so set it in
110 * back if necessary (read-only or clean PTE).
112 if (!pte_write(entry
) || !dirty
)
113 pte_val(entry
) |= PTE_RDONLY
;
116 * Setting the flags must be done atomically to avoid racing with the
117 * hardware update of the access/dirty state.
119 asm volatile("// ptep_set_access_flags\n"
120 " prfm pstl1strm, %2\n"
122 " and %0, %0, %3 // clear PTE_RDONLY\n"
123 " orr %0, %0, %4 // set flags\n"
124 " stxr %w1, %0, %2\n"
126 : "=&r" (old_pteval
), "=&r" (tmp
), "+Q" (pte_val(*ptep
))
127 : "L" (~PTE_RDONLY
), "r" (pte_val(entry
)));
129 flush_tlb_fix_spurious_fault(vma
, address
);
135 * The kernel tried to access some page that wasn't present.
137 static void __do_kernel_fault(struct mm_struct
*mm
, unsigned long addr
,
138 unsigned int esr
, struct pt_regs
*regs
)
141 * Are we prepared to handle this kernel fault?
143 if (fixup_exception(regs
))
147 * No handler, we'll have to terminate things with extreme prejudice.
150 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
151 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
152 "paging request", addr
);
155 die("Oops", regs
, esr
);
161 * Something tried to access memory that isn't in our memory map. User mode
162 * accesses just cause a SIGSEGV
164 static void __do_user_fault(struct task_struct
*tsk
, unsigned long addr
,
165 unsigned int esr
, unsigned int sig
, int code
,
166 struct pt_regs
*regs
)
170 if (unhandled_signal(tsk
, sig
) && show_unhandled_signals_ratelimited()) {
171 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
172 tsk
->comm
, task_pid_nr(tsk
), fault_name(esr
), sig
,
174 show_pte(tsk
->mm
, addr
);
178 tsk
->thread
.fault_address
= addr
;
179 tsk
->thread
.fault_code
= esr
;
183 si
.si_addr
= (void __user
*)addr
;
184 force_sig_info(sig
, &si
, tsk
);
187 static void do_bad_area(unsigned long addr
, unsigned int esr
, struct pt_regs
*regs
)
189 struct task_struct
*tsk
= current
;
190 struct mm_struct
*mm
= tsk
->active_mm
;
193 * If we are in kernel mode at this point, we have no context to
194 * handle this fault with.
197 __do_user_fault(tsk
, addr
, esr
, SIGSEGV
, SEGV_MAPERR
, regs
);
199 __do_kernel_fault(mm
, addr
, esr
, regs
);
202 #define VM_FAULT_BADMAP 0x010000
203 #define VM_FAULT_BADACCESS 0x020000
205 #define ESR_LNX_EXEC (1 << 24)
207 static int __do_page_fault(struct mm_struct
*mm
, unsigned long addr
,
208 unsigned int mm_flags
, unsigned long vm_flags
,
209 struct task_struct
*tsk
)
211 struct vm_area_struct
*vma
;
214 vma
= find_vma(mm
, addr
);
215 fault
= VM_FAULT_BADMAP
;
218 if (unlikely(vma
->vm_start
> addr
))
222 * Ok, we have a good vm_area for this memory access, so we can handle
227 * Check that the permissions on the VMA allow for the fault which
228 * occurred. If we encountered a write or exec fault, we must have
229 * appropriate permissions, otherwise we allow any permission.
231 if (!(vma
->vm_flags
& vm_flags
)) {
232 fault
= VM_FAULT_BADACCESS
;
236 return handle_mm_fault(mm
, vma
, addr
& PAGE_MASK
, mm_flags
);
239 if (vma
->vm_flags
& VM_GROWSDOWN
&& !expand_stack(vma
, addr
))
245 static inline int permission_fault(unsigned int esr
)
247 unsigned int ec
= (esr
& ESR_ELx_EC_MASK
) >> ESR_ELx_EC_SHIFT
;
248 unsigned int fsc_type
= esr
& ESR_ELx_FSC_TYPE
;
250 return (ec
== ESR_ELx_EC_DABT_CUR
&& fsc_type
== ESR_ELx_FSC_PERM
);
253 static int __kprobes
do_page_fault(unsigned long addr
, unsigned int esr
,
254 struct pt_regs
*regs
)
256 struct task_struct
*tsk
;
257 struct mm_struct
*mm
;
258 int fault
, sig
, code
;
259 unsigned long vm_flags
= VM_READ
| VM_WRITE
| VM_EXEC
;
260 unsigned int mm_flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
266 * If we're in an interrupt or have no user context, we must not take
269 if (faulthandler_disabled() || !mm
)
273 mm_flags
|= FAULT_FLAG_USER
;
275 if (esr
& ESR_LNX_EXEC
) {
277 } else if ((esr
& ESR_ELx_WNR
) && !(esr
& ESR_ELx_CM
)) {
279 mm_flags
|= FAULT_FLAG_WRITE
;
282 if (permission_fault(esr
) && (addr
< USER_DS
)) {
283 if (get_fs() == KERNEL_DS
)
284 die("Accessing user space memory with fs=KERNEL_DS", regs
, esr
);
286 if (!search_exception_tables(regs
->pc
))
287 die("Accessing user space memory outside uaccess.h routines", regs
, esr
);
291 * As per x86, we may deadlock here. However, since the kernel only
292 * validly references user space from well defined areas of the code,
293 * we can bug out early if this is from code which shouldn't.
295 if (!down_read_trylock(&mm
->mmap_sem
)) {
296 if (!user_mode(regs
) && !search_exception_tables(regs
->pc
))
299 down_read(&mm
->mmap_sem
);
302 * The above down_read_trylock() might have succeeded in which
303 * case, we'll have missed the might_sleep() from down_read().
306 #ifdef CONFIG_DEBUG_VM
307 if (!user_mode(regs
) && !search_exception_tables(regs
->pc
))
312 fault
= __do_page_fault(mm
, addr
, mm_flags
, vm_flags
, tsk
);
315 * If we need to retry but a fatal signal is pending, handle the
316 * signal first. We do not need to release the mmap_sem because it
317 * would already be released in __lock_page_or_retry in mm/filemap.c.
319 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
323 * Major/minor page fault accounting is only done on the initial
324 * attempt. If we go through a retry, it is extremely likely that the
325 * page will be found in page cache at that point.
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
329 if (mm_flags
& FAULT_FLAG_ALLOW_RETRY
) {
330 if (fault
& VM_FAULT_MAJOR
) {
332 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1, regs
,
336 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1, regs
,
339 if (fault
& VM_FAULT_RETRY
) {
341 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
344 mm_flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
345 mm_flags
|= FAULT_FLAG_TRIED
;
350 up_read(&mm
->mmap_sem
);
353 * Handle the "normal" case first - VM_FAULT_MAJOR
355 if (likely(!(fault
& (VM_FAULT_ERROR
| VM_FAULT_BADMAP
|
356 VM_FAULT_BADACCESS
))))
360 * If we are in kernel mode at this point, we have no context to
361 * handle this fault with.
363 if (!user_mode(regs
))
366 if (fault
& VM_FAULT_OOM
) {
368 * We ran out of memory, call the OOM killer, and return to
369 * userspace (which will retry the fault, or kill us if we got
372 pagefault_out_of_memory();
376 if (fault
& VM_FAULT_SIGBUS
) {
378 * We had some memory, but were unable to successfully fix up
385 * Something tried to access memory that isn't in our memory
389 code
= fault
== VM_FAULT_BADACCESS
?
390 SEGV_ACCERR
: SEGV_MAPERR
;
393 __do_user_fault(tsk
, addr
, esr
, sig
, code
, regs
);
397 __do_kernel_fault(mm
, addr
, esr
, regs
);
402 * First Level Translation Fault Handler
404 * We enter here because the first level page table doesn't contain a valid
405 * entry for the address.
407 * If the address is in kernel space (>= TASK_SIZE), then we are probably
408 * faulting in the vmalloc() area.
410 * If the init_task's first level page tables contains the relevant entry, we
411 * copy the it to this task. If not, we send the process a signal, fixup the
412 * exception, or oops the kernel.
414 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
415 * or a critical region, and should only copy the information from the master
416 * page table, nothing more.
418 static int __kprobes
do_translation_fault(unsigned long addr
,
420 struct pt_regs
*regs
)
422 if (addr
< TASK_SIZE
)
423 return do_page_fault(addr
, esr
, regs
);
425 do_bad_area(addr
, esr
, regs
);
429 static int do_alignment_fault(unsigned long addr
, unsigned int esr
,
430 struct pt_regs
*regs
)
432 do_bad_area(addr
, esr
, regs
);
437 * This abort handler always returns "fault".
439 static int do_bad(unsigned long addr
, unsigned int esr
, struct pt_regs
*regs
)
444 static struct fault_info
{
445 int (*fn
)(unsigned long addr
, unsigned int esr
, struct pt_regs
*regs
);
450 { do_bad
, SIGBUS
, 0, "ttbr address size fault" },
451 { do_bad
, SIGBUS
, 0, "level 1 address size fault" },
452 { do_bad
, SIGBUS
, 0, "level 2 address size fault" },
453 { do_bad
, SIGBUS
, 0, "level 3 address size fault" },
454 { do_translation_fault
, SIGSEGV
, SEGV_MAPERR
, "level 0 translation fault" },
455 { do_translation_fault
, SIGSEGV
, SEGV_MAPERR
, "level 1 translation fault" },
456 { do_translation_fault
, SIGSEGV
, SEGV_MAPERR
, "level 2 translation fault" },
457 { do_page_fault
, SIGSEGV
, SEGV_MAPERR
, "level 3 translation fault" },
458 { do_bad
, SIGBUS
, 0, "unknown 8" },
459 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 1 access flag fault" },
460 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 2 access flag fault" },
461 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 3 access flag fault" },
462 { do_bad
, SIGBUS
, 0, "unknown 12" },
463 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 1 permission fault" },
464 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 2 permission fault" },
465 { do_page_fault
, SIGSEGV
, SEGV_ACCERR
, "level 3 permission fault" },
466 { do_bad
, SIGBUS
, 0, "synchronous external abort" },
467 { do_bad
, SIGBUS
, 0, "unknown 17" },
468 { do_bad
, SIGBUS
, 0, "unknown 18" },
469 { do_bad
, SIGBUS
, 0, "unknown 19" },
470 { do_bad
, SIGBUS
, 0, "synchronous abort (translation table walk)" },
471 { do_bad
, SIGBUS
, 0, "synchronous abort (translation table walk)" },
472 { do_bad
, SIGBUS
, 0, "synchronous abort (translation table walk)" },
473 { do_bad
, SIGBUS
, 0, "synchronous abort (translation table walk)" },
474 { do_bad
, SIGBUS
, 0, "synchronous parity error" },
475 { do_bad
, SIGBUS
, 0, "unknown 25" },
476 { do_bad
, SIGBUS
, 0, "unknown 26" },
477 { do_bad
, SIGBUS
, 0, "unknown 27" },
478 { do_bad
, SIGBUS
, 0, "synchronous parity error (translation table walk)" },
479 { do_bad
, SIGBUS
, 0, "synchronous parity error (translation table walk)" },
480 { do_bad
, SIGBUS
, 0, "synchronous parity error (translation table walk)" },
481 { do_bad
, SIGBUS
, 0, "synchronous parity error (translation table walk)" },
482 { do_bad
, SIGBUS
, 0, "unknown 32" },
483 { do_alignment_fault
, SIGBUS
, BUS_ADRALN
, "alignment fault" },
484 { do_bad
, SIGBUS
, 0, "unknown 34" },
485 { do_bad
, SIGBUS
, 0, "unknown 35" },
486 { do_bad
, SIGBUS
, 0, "unknown 36" },
487 { do_bad
, SIGBUS
, 0, "unknown 37" },
488 { do_bad
, SIGBUS
, 0, "unknown 38" },
489 { do_bad
, SIGBUS
, 0, "unknown 39" },
490 { do_bad
, SIGBUS
, 0, "unknown 40" },
491 { do_bad
, SIGBUS
, 0, "unknown 41" },
492 { do_bad
, SIGBUS
, 0, "unknown 42" },
493 { do_bad
, SIGBUS
, 0, "unknown 43" },
494 { do_bad
, SIGBUS
, 0, "unknown 44" },
495 { do_bad
, SIGBUS
, 0, "unknown 45" },
496 { do_bad
, SIGBUS
, 0, "unknown 46" },
497 { do_bad
, SIGBUS
, 0, "unknown 47" },
498 { do_bad
, SIGBUS
, 0, "TLB conflict abort" },
499 { do_bad
, SIGBUS
, 0, "unknown 49" },
500 { do_bad
, SIGBUS
, 0, "unknown 50" },
501 { do_bad
, SIGBUS
, 0, "unknown 51" },
502 { do_bad
, SIGBUS
, 0, "implementation fault (lockdown abort)" },
503 { do_bad
, SIGBUS
, 0, "implementation fault (unsupported exclusive)" },
504 { do_bad
, SIGBUS
, 0, "unknown 54" },
505 { do_bad
, SIGBUS
, 0, "unknown 55" },
506 { do_bad
, SIGBUS
, 0, "unknown 56" },
507 { do_bad
, SIGBUS
, 0, "unknown 57" },
508 { do_bad
, SIGBUS
, 0, "unknown 58" },
509 { do_bad
, SIGBUS
, 0, "unknown 59" },
510 { do_bad
, SIGBUS
, 0, "unknown 60" },
511 { do_bad
, SIGBUS
, 0, "section domain fault" },
512 { do_bad
, SIGBUS
, 0, "page domain fault" },
513 { do_bad
, SIGBUS
, 0, "unknown 63" },
516 static const char *fault_name(unsigned int esr
)
518 const struct fault_info
*inf
= fault_info
+ (esr
& 63);
523 * Dispatch a data abort to the relevant handler.
525 asmlinkage
void __exception
do_mem_abort(unsigned long addr
, unsigned int esr
,
526 struct pt_regs
*regs
)
528 const struct fault_info
*inf
= fault_info
+ (esr
& 63);
531 if (!inf
->fn(addr
, esr
, regs
))
534 pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
535 inf
->name
, esr
, addr
);
537 info
.si_signo
= inf
->sig
;
539 info
.si_code
= inf
->code
;
540 info
.si_addr
= (void __user
*)addr
;
541 arm64_notify_die("", regs
, &info
, esr
);
545 * Handle stack alignment exceptions.
547 asmlinkage
void __exception
do_sp_pc_abort(unsigned long addr
,
549 struct pt_regs
*regs
)
552 struct task_struct
*tsk
= current
;
554 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGBUS
))
555 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
556 tsk
->comm
, task_pid_nr(tsk
),
557 esr_get_class_string(esr
), (void *)regs
->pc
,
560 info
.si_signo
= SIGBUS
;
562 info
.si_code
= BUS_ADRALN
;
563 info
.si_addr
= (void __user
*)addr
;
564 arm64_notify_die("Oops - SP/PC alignment exception", regs
, &info
, esr
);
567 int __init
early_brk64(unsigned long addr
, unsigned int esr
,
568 struct pt_regs
*regs
);
571 * __refdata because early_brk64 is __init, but the reference to it is
572 * clobbered at arch_initcall time.
573 * See traps.c and debug-monitors.c:debug_traps_init().
575 static struct fault_info __refdata debug_fault_info
[] = {
576 { do_bad
, SIGTRAP
, TRAP_HWBKPT
, "hardware breakpoint" },
577 { do_bad
, SIGTRAP
, TRAP_HWBKPT
, "hardware single-step" },
578 { do_bad
, SIGTRAP
, TRAP_HWBKPT
, "hardware watchpoint" },
579 { do_bad
, SIGBUS
, 0, "unknown 3" },
580 { do_bad
, SIGTRAP
, TRAP_BRKPT
, "aarch32 BKPT" },
581 { do_bad
, SIGTRAP
, 0, "aarch32 vector catch" },
582 { early_brk64
, SIGTRAP
, TRAP_BRKPT
, "aarch64 BRK" },
583 { do_bad
, SIGBUS
, 0, "unknown 7" },
586 void __init
hook_debug_fault_code(int nr
,
587 int (*fn
)(unsigned long, unsigned int, struct pt_regs
*),
588 int sig
, int code
, const char *name
)
590 BUG_ON(nr
< 0 || nr
>= ARRAY_SIZE(debug_fault_info
));
592 debug_fault_info
[nr
].fn
= fn
;
593 debug_fault_info
[nr
].sig
= sig
;
594 debug_fault_info
[nr
].code
= code
;
595 debug_fault_info
[nr
].name
= name
;
598 asmlinkage
int __exception
do_debug_exception(unsigned long addr
,
600 struct pt_regs
*regs
)
602 const struct fault_info
*inf
= debug_fault_info
+ DBG_ESR_EVT(esr
);
607 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
608 * already disabled to preserve the last enabled/disabled addresses.
610 if (interrupts_enabled(regs
))
611 trace_hardirqs_off();
613 if (!inf
->fn(addr
, esr
, regs
)) {
616 pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
617 inf
->name
, esr
, addr
);
619 info
.si_signo
= inf
->sig
;
621 info
.si_code
= inf
->code
;
622 info
.si_addr
= (void __user
*)addr
;
623 arm64_notify_die("", regs
, &info
, 0);
627 if (interrupts_enabled(regs
))
633 #ifdef CONFIG_ARM64_PAN
634 void cpu_enable_pan(void *__unused
)
636 config_sctlr_el1(SCTLR_EL1_SPAN
, 0);
638 #endif /* CONFIG_ARM64_PAN */
640 #ifdef CONFIG_ARM64_UAO
642 * Kernel threads have fs=KERNEL_DS by default, and don't need to call
643 * set_fs(), devtmpfs in particular relies on this behaviour.
644 * We need to enable the feature at runtime (instead of adding it to
645 * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
647 void cpu_enable_uao(void *__unused
)
649 asm(SET_PSTATE_UAO(1));
651 #endif /* CONFIG_ARM64_UAO */