]>
Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Derived from "arch/i386/mm/fault.c" | |
6 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
7 | * | |
8 | * Modified by Cort Dougan and Paul Mackerras. | |
9 | * | |
10 | * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
14cf11af PM |
18 | #include <linux/signal.h> |
19 | #include <linux/sched.h> | |
68db0cf1 | 20 | #include <linux/sched/task_stack.h> |
14cf11af PM |
21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/ptrace.h> | |
26 | #include <linux/mman.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/highmem.h> | |
8a39b05f | 30 | #include <linux/extable.h> |
14cf11af | 31 | #include <linux/kprobes.h> |
1eeb66a1 | 32 | #include <linux/kdebug.h> |
cdd6c482 | 33 | #include <linux/perf_event.h> |
76462232 | 34 | #include <linux/ratelimit.h> |
ba12eede | 35 | #include <linux/context_tracking.h> |
9d57472f | 36 | #include <linux/hugetlb.h> |
70ffdb93 | 37 | #include <linux/uaccess.h> |
14cf11af | 38 | |
40900194 | 39 | #include <asm/firmware.h> |
14cf11af PM |
40 | #include <asm/page.h> |
41 | #include <asm/pgtable.h> | |
42 | #include <asm/mmu.h> | |
43 | #include <asm/mmu_context.h> | |
14cf11af | 44 | #include <asm/tlbflush.h> |
14cf11af | 45 | #include <asm/siginfo.h> |
ae3a197e | 46 | #include <asm/debug.h> |
4f9e87c0 | 47 | |
bb4be50e | 48 | static inline bool notify_page_fault(struct pt_regs *regs) |
4f9e87c0 | 49 | { |
bb4be50e | 50 | bool ret = false; |
9f90b997 | 51 | |
bb4be50e | 52 | #ifdef CONFIG_KPROBES |
9f90b997 CH |
53 | /* kprobe_running() needs smp_processor_id() */ |
54 | if (!user_mode(regs)) { | |
55 | preempt_disable(); | |
56 | if (kprobe_running() && kprobe_fault_handler(regs, 11)) | |
bb4be50e | 57 | ret = true; |
9f90b997 CH |
58 | preempt_enable(); |
59 | } | |
bb4be50e BH |
60 | #endif /* CONFIG_KPROBES */ |
61 | ||
62 | if (unlikely(debugger_fault_handler(regs))) | |
63 | ret = true; | |
4f9e87c0 | 64 | |
9f90b997 | 65 | return ret; |
4f9e87c0 | 66 | } |
4f9e87c0 | 67 | |
14cf11af PM |
68 | /* |
69 | * Check whether the instruction at regs->nip is a store using | |
70 | * an update addressing form which will update r1. | |
71 | */ | |
8f5ca0b3 | 72 | static bool store_updates_sp(struct pt_regs *regs) |
14cf11af PM |
73 | { |
74 | unsigned int inst; | |
75 | ||
76 | if (get_user(inst, (unsigned int __user *)regs->nip)) | |
8f5ca0b3 | 77 | return false; |
14cf11af PM |
78 | /* check for 1 in the rA field */ |
79 | if (((inst >> 16) & 0x1f) != 1) | |
8f5ca0b3 | 80 | return false; |
14cf11af PM |
81 | /* check major opcode */ |
82 | switch (inst >> 26) { | |
83 | case 37: /* stwu */ | |
84 | case 39: /* stbu */ | |
85 | case 45: /* sthu */ | |
86 | case 53: /* stfsu */ | |
87 | case 55: /* stfdu */ | |
8f5ca0b3 | 88 | return true; |
14cf11af PM |
89 | case 62: /* std or stdu */ |
90 | return (inst & 3) == 1; | |
91 | case 31: | |
92 | /* check minor opcode */ | |
93 | switch ((inst >> 1) & 0x3ff) { | |
94 | case 181: /* stdux */ | |
95 | case 183: /* stwux */ | |
96 | case 247: /* stbux */ | |
97 | case 439: /* sthux */ | |
98 | case 695: /* stfsux */ | |
99 | case 759: /* stfdux */ | |
8f5ca0b3 | 100 | return true; |
14cf11af PM |
101 | } |
102 | } | |
8f5ca0b3 | 103 | return false; |
14cf11af | 104 | } |
9be72573 BH |
105 | /* |
106 | * do_page_fault error handling helpers | |
107 | */ | |
108 | ||
c3350602 BH |
109 | static int |
110 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) | |
111 | { | |
112 | /* | |
113 | * If we are in kernel mode, bail out with a SEGV, this will | |
114 | * be caught by the assembly which will restore the non-volatile | |
115 | * registers before calling bad_page_fault() | |
116 | */ | |
117 | if (!user_mode(regs)) | |
118 | return SIGSEGV; | |
119 | ||
120 | _exception(SIGSEGV, regs, si_code, address); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) | |
126 | { | |
127 | return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); | |
128 | } | |
129 | ||
130 | static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) | |
131 | { | |
132 | struct mm_struct *mm = current->mm; | |
133 | ||
134 | /* | |
135 | * Something tried to access memory that isn't in our memory map.. | |
136 | * Fix it, but check if it's kernel or user first.. | |
137 | */ | |
138 | up_read(&mm->mmap_sem); | |
139 | ||
140 | return __bad_area_nosemaphore(regs, address, si_code); | |
141 | } | |
142 | ||
143 | static noinline int bad_area(struct pt_regs *regs, unsigned long address) | |
144 | { | |
145 | return __bad_area(regs, address, SEGV_MAPERR); | |
146 | } | |
147 | ||
ecb101ae JS |
148 | static noinline int bad_access(struct pt_regs *regs, unsigned long address) |
149 | { | |
150 | return __bad_area(regs, address, SEGV_ACCERR); | |
151 | } | |
152 | ||
3913fdd7 AB |
153 | static int do_sigbus(struct pt_regs *regs, unsigned long address, |
154 | unsigned int fault) | |
9be72573 BH |
155 | { |
156 | siginfo_t info; | |
9d57472f | 157 | unsigned int lsb = 0; |
9be72573 | 158 | |
63af5262 | 159 | if (!user_mode(regs)) |
b5c8f0fd | 160 | return SIGBUS; |
63af5262 AB |
161 | |
162 | current->thread.trap_nr = BUS_ADRERR; | |
163 | info.si_signo = SIGBUS; | |
164 | info.si_errno = 0; | |
165 | info.si_code = BUS_ADRERR; | |
166 | info.si_addr = (void __user *)address; | |
3913fdd7 AB |
167 | #ifdef CONFIG_MEMORY_FAILURE |
168 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | |
169 | pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | |
170 | current->comm, current->pid, address); | |
171 | info.si_code = BUS_MCEERR_AR; | |
172 | } | |
9d57472f AB |
173 | |
174 | if (fault & VM_FAULT_HWPOISON_LARGE) | |
175 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
176 | if (fault & VM_FAULT_HWPOISON) | |
177 | lsb = PAGE_SHIFT; | |
3913fdd7 | 178 | #endif |
9d57472f | 179 | info.si_addr_lsb = lsb; |
63af5262 | 180 | force_sig_info(SIGBUS, &info, current); |
b5c8f0fd | 181 | return 0; |
9be72573 BH |
182 | } |
183 | ||
184 | static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | |
185 | { | |
186 | /* | |
b5c8f0fd BH |
187 | * Kernel page fault interrupted by SIGKILL. We have no reason to |
188 | * continue processing. | |
9be72573 | 189 | */ |
b5c8f0fd BH |
190 | if (fatal_signal_pending(current) && !user_mode(regs)) |
191 | return SIGKILL; | |
9be72573 BH |
192 | |
193 | /* Out of memory */ | |
c2d23f91 | 194 | if (fault & VM_FAULT_OOM) { |
c2d23f91 DR |
195 | /* |
196 | * We ran out of memory, or some other thing happened to us that | |
197 | * made us unable to handle the page fault gracefully. | |
198 | */ | |
199 | if (!user_mode(regs)) | |
b5c8f0fd | 200 | return SIGSEGV; |
c2d23f91 | 201 | pagefault_out_of_memory(); |
b5c8f0fd BH |
202 | } else { |
203 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | |
204 | VM_FAULT_HWPOISON_LARGE)) | |
205 | return do_sigbus(regs, addr, fault); | |
206 | else if (fault & VM_FAULT_SIGSEGV) | |
207 | return bad_area_nosemaphore(regs, addr); | |
208 | else | |
209 | BUG(); | |
c2d23f91 | 210 | } |
b5c8f0fd | 211 | return 0; |
9be72573 | 212 | } |
14cf11af | 213 | |
d3ca5874 BH |
214 | /* Is this a bad kernel fault ? */ |
215 | static bool bad_kernel_fault(bool is_exec, unsigned long error_code, | |
216 | unsigned long address) | |
217 | { | |
218 | if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) { | |
219 | printk_ratelimited(KERN_CRIT "kernel tried to execute" | |
220 | " exec-protected page (%lx) -" | |
221 | "exploit attempt? (uid: %d)\n", | |
222 | address, from_kuid(&init_user_ns, | |
223 | current_uid())); | |
224 | } | |
225 | return is_exec || (address >= TASK_SIZE); | |
226 | } | |
227 | ||
8f5ca0b3 BH |
228 | static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, |
229 | struct vm_area_struct *vma, | |
230 | bool store_update_sp) | |
231 | { | |
232 | /* | |
233 | * N.B. The POWER/Open ABI allows programs to access up to | |
234 | * 288 bytes below the stack pointer. | |
235 | * The kernel signal delivery code writes up to about 1.5kB | |
236 | * below the stack pointer (r1) before decrementing it. | |
237 | * The exec code can write slightly over 640kB to the stack | |
238 | * before setting the user r1. Thus we allow the stack to | |
239 | * expand to 1MB without further checks. | |
240 | */ | |
241 | if (address + 0x100000 < vma->vm_end) { | |
242 | /* get user regs even if this fault is in kernel mode */ | |
243 | struct pt_regs *uregs = current->thread.regs; | |
244 | if (uregs == NULL) | |
245 | return true; | |
246 | ||
247 | /* | |
248 | * A user-mode access to an address a long way below | |
249 | * the stack pointer is only valid if the instruction | |
250 | * is one which would update the stack pointer to the | |
251 | * address accessed if the instruction completed, | |
252 | * i.e. either stwu rs,n(r1) or stwux rs,r1,rb | |
253 | * (or the byte, halfword, float or double forms). | |
254 | * | |
255 | * If we don't check this then any write to the area | |
256 | * between the last mapped region and the stack will | |
257 | * expand the stack rather than segfaulting. | |
258 | */ | |
259 | if (address + 2048 < uregs->gpr[1] && !store_update_sp) | |
260 | return true; | |
261 | } | |
262 | return false; | |
263 | } | |
264 | ||
bd0d63f8 BH |
265 | static bool access_error(bool is_write, bool is_exec, |
266 | struct vm_area_struct *vma) | |
267 | { | |
268 | /* | |
269 | * Allow execution from readable areas if the MMU does not | |
270 | * provide separate controls over reading and executing. | |
271 | * | |
272 | * Note: That code used to not be enabled for 4xx/BookE. | |
273 | * It is now as I/D cache coherency for these is done at | |
274 | * set_pte_at() time and I see no reason why the test | |
275 | * below wouldn't be valid on those processors. This -may- | |
276 | * break programs compiled with a really old ABI though. | |
277 | */ | |
278 | if (is_exec) { | |
279 | return !(vma->vm_flags & VM_EXEC) && | |
280 | (cpu_has_feature(CPU_FTR_NOEXECUTE) || | |
281 | !(vma->vm_flags & (VM_READ | VM_WRITE))); | |
282 | } | |
283 | ||
284 | if (is_write) { | |
285 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
286 | return true; | |
287 | return false; | |
288 | } | |
289 | ||
290 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
291 | return true; | |
292 | ||
293 | return false; | |
294 | } | |
295 | ||
3da02648 BH |
296 | #ifdef CONFIG_PPC_SMLPAR |
297 | static inline void cmo_account_page_fault(void) | |
298 | { | |
299 | if (firmware_has_feature(FW_FEATURE_CMO)) { | |
300 | u32 page_ins; | |
301 | ||
302 | preempt_disable(); | |
303 | page_ins = be32_to_cpu(get_lppaca()->page_ins); | |
304 | page_ins += 1 << PAGE_FACTOR; | |
305 | get_lppaca()->page_ins = cpu_to_be32(page_ins); | |
306 | preempt_enable(); | |
307 | } | |
308 | } | |
309 | #else | |
310 | static inline void cmo_account_page_fault(void) { } | |
311 | #endif /* CONFIG_PPC_SMLPAR */ | |
312 | ||
2865d08d BH |
313 | #ifdef CONFIG_PPC_STD_MMU |
314 | static void sanity_check_fault(bool is_write, unsigned long error_code) | |
315 | { | |
316 | /* | |
317 | * For hash translation mode, we should never get a | |
318 | * PROTFAULT. Any update to pte to reduce access will result in us | |
319 | * removing the hash page table entry, thus resulting in a DSISR_NOHPTE | |
320 | * fault instead of DSISR_PROTFAULT. | |
321 | * | |
322 | * A pte update to relax the access will not result in a hash page table | |
323 | * entry invalidate and hence can result in DSISR_PROTFAULT. | |
324 | * ptep_set_access_flags() doesn't do a hpte flush. This is why we have | |
325 | * the special !is_write in the below conditional. | |
326 | * | |
327 | * For platforms that doesn't supports coherent icache and do support | |
328 | * per page noexec bit, we do setup things such that we do the | |
329 | * sync between D/I cache via fault. But that is handled via low level | |
330 | * hash fault code (hash_page_do_lazy_icache()) and we should not reach | |
331 | * here in such case. | |
332 | * | |
333 | * For wrong access that can result in PROTFAULT, the above vma->vm_flags | |
334 | * check should handle those and hence we should fall to the bad_area | |
335 | * handling correctly. | |
336 | * | |
337 | * For embedded with per page exec support that doesn't support coherent | |
338 | * icache we do get PROTFAULT and we handle that D/I cache sync in | |
339 | * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON | |
340 | * is conditional for server MMU. | |
341 | * | |
342 | * For radix, we can get prot fault for autonuma case, because radix | |
343 | * page table will have them marked noaccess for user. | |
344 | */ | |
345 | if (!radix_enabled() && !is_write) | |
346 | WARN_ON_ONCE(error_code & DSISR_PROTFAULT); | |
347 | } | |
348 | #else | |
349 | static void sanity_check_fault(bool is_write, unsigned long error_code) { } | |
350 | #endif /* CONFIG_PPC_STD_MMU */ | |
351 | ||
41b464e5 BH |
352 | /* |
353 | * Define the correct "is_write" bit in error_code based | |
354 | * on the processor family | |
355 | */ | |
356 | #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | |
357 | #define page_fault_is_write(__err) ((__err) & ESR_DST) | |
f3d96e69 | 358 | #define page_fault_is_bad(__err) (0) |
41b464e5 BH |
359 | #else |
360 | #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) | |
968159c0 | 361 | #if defined(CONFIG_PPC_8xx) |
4915349b | 362 | #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) |
f3d96e69 BH |
363 | #elif defined(CONFIG_PPC64) |
364 | #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) | |
365 | #else | |
366 | #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) | |
367 | #endif | |
41b464e5 BH |
368 | #endif |
369 | ||
14cf11af PM |
370 | /* |
371 | * For 600- and 800-family processors, the error_code parameter is DSISR | |
372 | * for a data fault, SRR1 for an instruction fault. For 400-family processors | |
373 | * the error_code parameter is ESR for a data fault, 0 for an instruction | |
374 | * fault. | |
375 | * For 64-bit processors, the error_code parameter is | |
376 | * - DSISR for a non-SLB data access fault, | |
377 | * - SRR1 & 0x08000000 for a non-SLB instruction access fault | |
378 | * - 0 any SLB fault. | |
379 | * | |
380 | * The return value is 0 if the fault was handled, or the signal | |
381 | * number if this is a kernel fault that can't be handled here. | |
382 | */ | |
7afad422 BH |
383 | static int __do_page_fault(struct pt_regs *regs, unsigned long address, |
384 | unsigned long error_code) | |
14cf11af PM |
385 | { |
386 | struct vm_area_struct * vma; | |
387 | struct mm_struct *mm = current->mm; | |
9be72573 | 388 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
c433ec04 | 389 | int is_exec = TRAP(regs) == 0x400; |
da929f6a | 390 | int is_user = user_mode(regs); |
41b464e5 | 391 | int is_write = page_fault_is_write(error_code); |
f43bb27e | 392 | int fault, major = 0; |
8f5ca0b3 | 393 | bool store_update_sp = false; |
14cf11af | 394 | |
9f90b997 | 395 | if (notify_page_fault(regs)) |
65d47fd4 | 396 | return 0; |
14cf11af | 397 | |
f3d96e69 | 398 | if (unlikely(page_fault_is_bad(error_code))) { |
65d47fd4 | 399 | if (is_user) { |
f3d96e69 | 400 | _exception(SIGBUS, regs, BUS_OBJERR, address); |
65d47fd4 BH |
401 | return 0; |
402 | } | |
403 | return SIGBUS; | |
e6c8290a | 404 | } |
e6c8290a | 405 | |
2865d08d BH |
406 | /* Additional sanity check(s) */ |
407 | sanity_check_fault(is_write, error_code); | |
408 | ||
d7df2443 BH |
409 | /* |
410 | * The kernel should never take an execute fault nor should it | |
411 | * take a page fault to a kernel address. | |
412 | */ | |
d3ca5874 | 413 | if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address))) |
65d47fd4 | 414 | return SIGSEGV; |
14cf11af | 415 | |
11ccdd33 BH |
416 | /* |
417 | * If we're in an interrupt, have no user context or are running | |
418 | * in a region with pagefaults disabled then we must not take the fault | |
419 | */ | |
420 | if (unlikely(faulthandler_disabled() || !mm)) { | |
421 | if (is_user) | |
422 | printk_ratelimited(KERN_ERR "Page fault in user mode" | |
423 | " with faulthandler_disabled()=%d" | |
424 | " mm=%p\n", | |
425 | faulthandler_disabled(), mm); | |
426 | return bad_area_nosemaphore(regs, address); | |
427 | } | |
428 | ||
a546498f BH |
429 | /* We restore the interrupt state now */ |
430 | if (!arch_irq_disabled_regs(regs)) | |
431 | local_irq_enable(); | |
432 | ||
a8b0ca17 | 433 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
7dd1fcc2 | 434 | |
69e044dd AK |
435 | /* |
436 | * We want to do this outside mmap_sem, because reading code around nip | |
437 | * can result in fault, which will cause a deadlock when called with | |
438 | * mmap_sem held | |
439 | */ | |
da929f6a | 440 | if (is_write && is_user) |
69e044dd AK |
441 | store_update_sp = store_updates_sp(regs); |
442 | ||
da929f6a | 443 | if (is_user) |
759496ba | 444 | flags |= FAULT_FLAG_USER; |
d2e0d2c5 BH |
445 | if (is_write) |
446 | flags |= FAULT_FLAG_WRITE; | |
447 | if (is_exec) | |
448 | flags |= FAULT_FLAG_INSTRUCTION; | |
759496ba | 449 | |
14cf11af PM |
450 | /* When running in the kernel we expect faults to occur only to |
451 | * addresses in user space. All other faults represent errors in the | |
fc5266ea AB |
452 | * kernel and should generate an OOPS. Unfortunately, in the case of an |
453 | * erroneous fault occurring in a code path which already holds mmap_sem | |
14cf11af PM |
454 | * we will deadlock attempting to validate the fault against the |
455 | * address space. Luckily the kernel only validly references user | |
456 | * space from well defined areas of code, which are listed in the | |
457 | * exceptions table. | |
458 | * | |
459 | * As the vast majority of faults will be valid we will only perform | |
fc5266ea | 460 | * the source reference check when there is a possibility of a deadlock. |
14cf11af PM |
461 | * Attempt to lock the address space, if we cannot we then validate the |
462 | * source. If this is invalid we can skip the address space check, | |
463 | * thus avoiding the deadlock. | |
464 | */ | |
b15021d9 | 465 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
da929f6a | 466 | if (!is_user && !search_exception_tables(regs->nip)) |
c3350602 | 467 | return bad_area_nosemaphore(regs, address); |
14cf11af | 468 | |
9be72573 | 469 | retry: |
14cf11af | 470 | down_read(&mm->mmap_sem); |
a546498f BH |
471 | } else { |
472 | /* | |
473 | * The above down_read_trylock() might have succeeded in | |
474 | * which case we'll have missed the might_sleep() from | |
475 | * down_read(): | |
476 | */ | |
477 | might_sleep(); | |
14cf11af PM |
478 | } |
479 | ||
480 | vma = find_vma(mm, address); | |
b15021d9 | 481 | if (unlikely(!vma)) |
c3350602 | 482 | return bad_area(regs, address); |
b15021d9 | 483 | if (likely(vma->vm_start <= address)) |
14cf11af | 484 | goto good_area; |
b15021d9 | 485 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) |
c3350602 | 486 | return bad_area(regs, address); |
14cf11af | 487 | |
8f5ca0b3 BH |
488 | /* The stack is being expanded, check if it's valid */ |
489 | if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp))) | |
490 | return bad_area(regs, address); | |
14cf11af | 491 | |
8f5ca0b3 | 492 | /* Try to expand it */ |
b15021d9 | 493 | if (unlikely(expand_stack(vma, address))) |
c3350602 | 494 | return bad_area(regs, address); |
14cf11af PM |
495 | |
496 | good_area: | |
bd0d63f8 | 497 | if (unlikely(access_error(is_write, is_exec, vma))) |
ecb101ae | 498 | return bad_access(regs, address); |
14cf11af PM |
499 | |
500 | /* | |
501 | * If for any reason at all we couldn't handle the fault, | |
502 | * make sure we exit gracefully rather than endlessly redo | |
503 | * the fault. | |
504 | */ | |
dcddffd4 | 505 | fault = handle_mm_fault(vma, address, flags); |
f43bb27e | 506 | major |= fault & VM_FAULT_MAJOR; |
14c02e41 LD |
507 | |
508 | /* | |
509 | * Handle the retry right now, the mmap_sem has been released in that | |
510 | * case. | |
511 | */ | |
512 | if (unlikely(fault & VM_FAULT_RETRY)) { | |
513 | /* We retry only once */ | |
514 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
515 | /* | |
516 | * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | |
517 | * of starvation. | |
518 | */ | |
519 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
520 | flags |= FAULT_FLAG_TRIED; | |
521 | if (!fatal_signal_pending(current)) | |
522 | goto retry; | |
523 | } | |
14c02e41 | 524 | |
b5c8f0fd BH |
525 | /* |
526 | * User mode? Just return to handle the fatal exception otherwise | |
527 | * return to bad_page_fault | |
528 | */ | |
529 | return is_user ? 0 : SIGBUS; | |
14cf11af | 530 | } |
9be72573 | 531 | |
b5c8f0fd BH |
532 | up_read(¤t->mm->mmap_sem); |
533 | ||
534 | if (unlikely(fault & VM_FAULT_ERROR)) | |
535 | return mm_fault_error(regs, address, fault); | |
536 | ||
9be72573 | 537 | /* |
14c02e41 | 538 | * Major/minor page fault accounting. |
9be72573 | 539 | */ |
f43bb27e | 540 | if (major) { |
14c02e41 | 541 | current->maj_flt++; |
04aafdc6 | 542 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); |
3da02648 | 543 | cmo_account_page_fault(); |
14c02e41 LD |
544 | } else { |
545 | current->min_flt++; | |
04aafdc6 | 546 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); |
ac17dc8e | 547 | } |
c3350602 | 548 | return 0; |
7afad422 BH |
549 | } |
550 | NOKPROBE_SYMBOL(__do_page_fault); | |
551 | ||
552 | int do_page_fault(struct pt_regs *regs, unsigned long address, | |
553 | unsigned long error_code) | |
554 | { | |
555 | enum ctx_state prev_state = exception_enter(); | |
556 | int rc = __do_page_fault(regs, address, error_code); | |
ba12eede LZ |
557 | exception_exit(prev_state); |
558 | return rc; | |
14cf11af | 559 | } |
03465f89 | 560 | NOKPROBE_SYMBOL(do_page_fault); |
14cf11af PM |
561 | |
562 | /* | |
563 | * bad_page_fault is called when we have a bad access from the kernel. | |
564 | * It is called from the DSI and ISI handlers in head.S and from some | |
565 | * of the procedures in traps.c. | |
566 | */ | |
567 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |
568 | { | |
569 | const struct exception_table_entry *entry; | |
570 | ||
571 | /* Are we prepared to handle this fault? */ | |
572 | if ((entry = search_exception_tables(regs->nip)) != NULL) { | |
61a92f70 | 573 | regs->nip = extable_fixup(entry); |
14cf11af PM |
574 | return; |
575 | } | |
576 | ||
577 | /* kernel has accessed a bad area */ | |
723925b7 | 578 | |
723925b7 | 579 | switch (regs->trap) { |
a416dd8d ME |
580 | case 0x300: |
581 | case 0x380: | |
582 | printk(KERN_ALERT "Unable to handle kernel paging request for " | |
583 | "data at address 0x%08lx\n", regs->dar); | |
584 | break; | |
585 | case 0x400: | |
586 | case 0x480: | |
587 | printk(KERN_ALERT "Unable to handle kernel paging request for " | |
588 | "instruction fetch\n"); | |
589 | break; | |
eab861a7 AB |
590 | case 0x600: |
591 | printk(KERN_ALERT "Unable to handle kernel paging request for " | |
592 | "unaligned access at address 0x%08lx\n", regs->dar); | |
593 | break; | |
a416dd8d ME |
594 | default: |
595 | printk(KERN_ALERT "Unable to handle kernel paging request for " | |
596 | "unknown fault\n"); | |
597 | break; | |
723925b7 OJ |
598 | } |
599 | printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", | |
600 | regs->nip); | |
601 | ||
a70857e4 | 602 | if (task_stack_end_corrupted(current)) |
28b54990 AB |
603 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
604 | ||
14cf11af PM |
605 | die("Kernel access of bad area", regs, sig); |
606 | } |