]>
Commit | Line | Data |
---|---|---|
26ff6c11 PM |
1 | /* |
2 | * Page fault handler for SH with an MMU. | |
1da177e4 | 3 | * |
1da177e4 | 4 | * Copyright (C) 1999 Niibe Yutaka |
dbdb4e9f | 5 | * Copyright (C) 2003 - 2012 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/fault.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
26ff6c11 PM |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
1da177e4 | 13 | */ |
1da177e4 | 14 | #include <linux/kernel.h> |
1da177e4 | 15 | #include <linux/mm.h> |
0f08f338 PM |
16 | #include <linux/hardirq.h> |
17 | #include <linux/kprobes.h> | |
cdd6c482 | 18 | #include <linux/perf_event.h> |
dbdb4e9f | 19 | #include <linux/kdebug.h> |
e7cc9a73 | 20 | #include <asm/io_trapped.h> |
1da177e4 | 21 | #include <asm/mmu_context.h> |
db2e1fa3 | 22 | #include <asm/tlbflush.h> |
e839ca52 | 23 | #include <asm/traps.h> |
1da177e4 | 24 | |
7433ab77 PM |
25 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
26 | { | |
27 | int ret = 0; | |
28 | ||
c63c3105 | 29 | if (kprobes_built_in() && !user_mode(regs)) { |
7433ab77 PM |
30 | preempt_disable(); |
31 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | |
32 | ret = 1; | |
33 | preempt_enable(); | |
34 | } | |
7433ab77 PM |
35 | |
36 | return ret; | |
37 | } | |
38 | ||
dbdb4e9f PM |
39 | static void |
40 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
41 | struct task_struct *tsk) | |
42 | { | |
43 | siginfo_t info; | |
44 | ||
45 | info.si_signo = si_signo; | |
46 | info.si_errno = 0; | |
47 | info.si_code = si_code; | |
48 | info.si_addr = (void __user *)address; | |
49 | ||
50 | force_sig_info(si_signo, &info, tsk); | |
51 | } | |
52 | ||
45c0e0e2 SM |
53 | /* |
54 | * This is useful to dump out the page tables associated with | |
55 | * 'addr' in mm 'mm'. | |
56 | */ | |
57 | static void show_pte(struct mm_struct *mm, unsigned long addr) | |
58 | { | |
59 | pgd_t *pgd; | |
60 | ||
90eed7d8 | 61 | if (mm) { |
45c0e0e2 | 62 | pgd = mm->pgd; |
90eed7d8 | 63 | } else { |
45c0e0e2 SM |
64 | pgd = get_TTB(); |
65 | ||
90eed7d8 PM |
66 | if (unlikely(!pgd)) |
67 | pgd = swapper_pg_dir; | |
68 | } | |
69 | ||
45c0e0e2 SM |
70 | printk(KERN_ALERT "pgd = %p\n", pgd); |
71 | pgd += pgd_index(addr); | |
72 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, | |
28080329 | 73 | (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); |
45c0e0e2 SM |
74 | |
75 | do { | |
76 | pud_t *pud; | |
77 | pmd_t *pmd; | |
78 | pte_t *pte; | |
79 | ||
80 | if (pgd_none(*pgd)) | |
81 | break; | |
82 | ||
83 | if (pgd_bad(*pgd)) { | |
84 | printk("(bad)"); | |
85 | break; | |
86 | } | |
87 | ||
88 | pud = pud_offset(pgd, addr); | |
89 | if (PTRS_PER_PUD != 1) | |
28080329 | 90 | printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2), |
45c0e0e2 SM |
91 | (u64)pud_val(*pud)); |
92 | ||
93 | if (pud_none(*pud)) | |
94 | break; | |
95 | ||
96 | if (pud_bad(*pud)) { | |
97 | printk("(bad)"); | |
98 | break; | |
99 | } | |
100 | ||
101 | pmd = pmd_offset(pud, addr); | |
102 | if (PTRS_PER_PMD != 1) | |
28080329 | 103 | printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), |
45c0e0e2 SM |
104 | (u64)pmd_val(*pmd)); |
105 | ||
106 | if (pmd_none(*pmd)) | |
107 | break; | |
108 | ||
109 | if (pmd_bad(*pmd)) { | |
110 | printk("(bad)"); | |
111 | break; | |
112 | } | |
113 | ||
114 | /* We must not map this if we have highmem enabled */ | |
115 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) | |
116 | break; | |
117 | ||
118 | pte = pte_offset_kernel(pmd, addr); | |
28080329 PM |
119 | printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2), |
120 | (u64)pte_val(*pte)); | |
45c0e0e2 SM |
121 | } while (0); |
122 | ||
123 | printk("\n"); | |
124 | } | |
125 | ||
0f60bb25 PM |
126 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
127 | { | |
128 | unsigned index = pgd_index(address); | |
129 | pgd_t *pgd_k; | |
130 | pud_t *pud, *pud_k; | |
131 | pmd_t *pmd, *pmd_k; | |
132 | ||
133 | pgd += index; | |
134 | pgd_k = init_mm.pgd + index; | |
135 | ||
136 | if (!pgd_present(*pgd_k)) | |
137 | return NULL; | |
138 | ||
139 | pud = pud_offset(pgd, address); | |
140 | pud_k = pud_offset(pgd_k, address); | |
141 | if (!pud_present(*pud_k)) | |
142 | return NULL; | |
143 | ||
5d9b4b19 MF |
144 | if (!pud_present(*pud)) |
145 | set_pud(pud, *pud_k); | |
146 | ||
0f60bb25 PM |
147 | pmd = pmd_offset(pud, address); |
148 | pmd_k = pmd_offset(pud_k, address); | |
149 | if (!pmd_present(*pmd_k)) | |
150 | return NULL; | |
151 | ||
152 | if (!pmd_present(*pmd)) | |
153 | set_pmd(pmd, *pmd_k); | |
05dd2cd3 MF |
154 | else { |
155 | /* | |
156 | * The page tables are fully synchronised so there must | |
157 | * be another reason for the fault. Return NULL here to | |
158 | * signal that we have not taken care of the fault. | |
159 | */ | |
0f60bb25 | 160 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
05dd2cd3 MF |
161 | return NULL; |
162 | } | |
0f60bb25 PM |
163 | |
164 | return pmd_k; | |
165 | } | |
166 | ||
d8fd35fc PM |
167 | #ifdef CONFIG_SH_STORE_QUEUES |
168 | #define __FAULT_ADDR_LIMIT P3_ADDR_MAX | |
169 | #else | |
170 | #define __FAULT_ADDR_LIMIT VMALLOC_END | |
171 | #endif | |
172 | ||
0f60bb25 PM |
173 | /* |
174 | * Handle a fault on the vmalloc or module mapping area | |
175 | */ | |
176 | static noinline int vmalloc_fault(unsigned long address) | |
177 | { | |
178 | pgd_t *pgd_k; | |
179 | pmd_t *pmd_k; | |
180 | pte_t *pte_k; | |
181 | ||
c3e0af98 | 182 | /* Make sure we are in vmalloc/module/P3 area: */ |
d8fd35fc | 183 | if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT)) |
0f60bb25 PM |
184 | return -1; |
185 | ||
186 | /* | |
187 | * Synchronize this task's top level page-table | |
188 | * with the 'reference' page table. | |
189 | * | |
190 | * Do _not_ use "current" here. We might be inside | |
191 | * an interrupt in the middle of a task switch.. | |
192 | */ | |
193 | pgd_k = get_TTB(); | |
05dd2cd3 | 194 | pmd_k = vmalloc_sync_one(pgd_k, address); |
0f60bb25 PM |
195 | if (!pmd_k) |
196 | return -1; | |
197 | ||
198 | pte_k = pte_offset_kernel(pmd_k, address); | |
199 | if (!pte_present(*pte_k)) | |
200 | return -1; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
dbdb4e9f PM |
205 | static void |
206 | show_fault_oops(struct pt_regs *regs, unsigned long address) | |
207 | { | |
208 | if (!oops_may_print()) | |
209 | return; | |
210 | ||
211 | printk(KERN_ALERT "BUG: unable to handle kernel "); | |
212 | if (address < PAGE_SIZE) | |
213 | printk(KERN_CONT "NULL pointer dereference"); | |
214 | else | |
215 | printk(KERN_CONT "paging request"); | |
216 | ||
217 | printk(KERN_CONT " at %08lx\n", address); | |
218 | printk(KERN_ALERT "PC:"); | |
219 | printk_address(regs->pc, 1); | |
220 | ||
221 | show_pte(NULL, address); | |
222 | } | |
223 | ||
224 | static noinline void | |
5a1dc78a | 225 | no_context(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
226 | unsigned long address) |
227 | { | |
228 | /* Are we prepared to handle this kernel fault? */ | |
229 | if (fixup_exception(regs)) | |
230 | return; | |
231 | ||
232 | if (handle_trapped_io(regs, address)) | |
233 | return; | |
234 | ||
235 | /* | |
236 | * Oops. The kernel tried to access some bad page. We'll have to | |
237 | * terminate things with extreme prejudice. | |
238 | */ | |
239 | bust_spinlocks(1); | |
240 | ||
241 | show_fault_oops(regs, address); | |
242 | ||
5a1dc78a | 243 | die("Oops", regs, error_code); |
dbdb4e9f PM |
244 | bust_spinlocks(0); |
245 | do_exit(SIGKILL); | |
246 | } | |
247 | ||
248 | static void | |
5a1dc78a | 249 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
250 | unsigned long address, int si_code) |
251 | { | |
252 | struct task_struct *tsk = current; | |
253 | ||
254 | /* User mode accesses just cause a SIGSEGV */ | |
255 | if (user_mode(regs)) { | |
256 | /* | |
257 | * It's possible to have interrupts off here: | |
258 | */ | |
259 | local_irq_enable(); | |
260 | ||
261 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | |
262 | ||
263 | return; | |
264 | } | |
265 | ||
5a1dc78a | 266 | no_context(regs, error_code, address); |
dbdb4e9f PM |
267 | } |
268 | ||
269 | static noinline void | |
5a1dc78a | 270 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
271 | unsigned long address) |
272 | { | |
5a1dc78a | 273 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
dbdb4e9f PM |
274 | } |
275 | ||
276 | static void | |
5a1dc78a | 277 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
278 | unsigned long address, int si_code) |
279 | { | |
280 | struct mm_struct *mm = current->mm; | |
281 | ||
282 | /* | |
283 | * Something tried to access memory that isn't in our memory map.. | |
284 | * Fix it, but check if it's kernel or user first.. | |
285 | */ | |
286 | up_read(&mm->mmap_sem); | |
287 | ||
5a1dc78a | 288 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
dbdb4e9f PM |
289 | } |
290 | ||
291 | static noinline void | |
5a1dc78a | 292 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
dbdb4e9f | 293 | { |
5a1dc78a | 294 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
dbdb4e9f PM |
295 | } |
296 | ||
297 | static noinline void | |
5a1dc78a | 298 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
299 | unsigned long address) |
300 | { | |
5a1dc78a | 301 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
dbdb4e9f PM |
302 | } |
303 | ||
dbdb4e9f | 304 | static void |
5a1dc78a | 305 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
dbdb4e9f PM |
306 | { |
307 | struct task_struct *tsk = current; | |
308 | struct mm_struct *mm = tsk->mm; | |
309 | ||
310 | up_read(&mm->mmap_sem); | |
311 | ||
312 | /* Kernel mode? Handle exceptions or die: */ | |
313 | if (!user_mode(regs)) | |
5a1dc78a | 314 | no_context(regs, error_code, address); |
dbdb4e9f PM |
315 | |
316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | |
317 | } | |
318 | ||
319 | static noinline int | |
5a1dc78a | 320 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
321 | unsigned long address, unsigned int fault) |
322 | { | |
323 | /* | |
324 | * Pagefault was interrupted by SIGKILL. We have no reason to | |
325 | * continue pagefault. | |
326 | */ | |
327 | if (fatal_signal_pending(current)) { | |
328 | if (!(fault & VM_FAULT_RETRY)) | |
329 | up_read(¤t->mm->mmap_sem); | |
330 | if (!user_mode(regs)) | |
5a1dc78a | 331 | no_context(regs, error_code, address); |
dbdb4e9f PM |
332 | return 1; |
333 | } | |
334 | ||
335 | if (!(fault & VM_FAULT_ERROR)) | |
336 | return 0; | |
337 | ||
338 | if (fault & VM_FAULT_OOM) { | |
339 | /* Kernel mode? Handle exceptions or die: */ | |
340 | if (!user_mode(regs)) { | |
341 | up_read(¤t->mm->mmap_sem); | |
5a1dc78a | 342 | no_context(regs, error_code, address); |
dbdb4e9f PM |
343 | return 1; |
344 | } | |
c2d23f91 | 345 | up_read(¤t->mm->mmap_sem); |
dbdb4e9f | 346 | |
c2d23f91 DR |
347 | /* |
348 | * We ran out of memory, call the OOM killer, and return the | |
349 | * userspace (which will retry the fault, or kill us if we got | |
350 | * oom-killed): | |
351 | */ | |
352 | pagefault_out_of_memory(); | |
dbdb4e9f PM |
353 | } else { |
354 | if (fault & VM_FAULT_SIGBUS) | |
5a1dc78a | 355 | do_sigbus(regs, error_code, address); |
dbdb4e9f PM |
356 | else |
357 | BUG(); | |
358 | } | |
359 | ||
360 | return 1; | |
361 | } | |
362 | ||
28080329 | 363 | static inline int access_error(int error_code, struct vm_area_struct *vma) |
dbdb4e9f | 364 | { |
28080329 | 365 | if (error_code & FAULT_CODE_WRITE) { |
dbdb4e9f PM |
366 | /* write, present and write, not present: */ |
367 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
368 | return 1; | |
369 | return 0; | |
370 | } | |
371 | ||
28080329 PM |
372 | /* ITLB miss on NX page */ |
373 | if (unlikely((error_code & FAULT_CODE_ITLB) && | |
374 | !(vma->vm_flags & VM_EXEC))) | |
375 | return 1; | |
376 | ||
dbdb4e9f PM |
377 | /* read, not present: */ |
378 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
379 | return 1; | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
0f60bb25 PM |
384 | static int fault_in_kernel_space(unsigned long address) |
385 | { | |
386 | return address >= TASK_SIZE; | |
387 | } | |
388 | ||
1da177e4 LT |
389 | /* |
390 | * This routine handles page faults. It determines the address, | |
391 | * and the problem, and then passes it off to one of the appropriate | |
392 | * routines. | |
393 | */ | |
b5a1bcbe | 394 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
5a1dc78a | 395 | unsigned long error_code, |
b5a1bcbe | 396 | unsigned long address) |
1da177e4 | 397 | { |
0f60bb25 | 398 | unsigned long vec; |
1da177e4 LT |
399 | struct task_struct *tsk; |
400 | struct mm_struct *mm; | |
401 | struct vm_area_struct * vma; | |
83c54070 | 402 | int fault; |
759496ba | 403 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1da177e4 | 404 | |
1da177e4 | 405 | tsk = current; |
0f60bb25 | 406 | mm = tsk->mm; |
0f60bb25 | 407 | vec = lookup_exception_vector(); |
1da177e4 | 408 | |
0f60bb25 PM |
409 | /* |
410 | * We fault-in kernel-space virtual memory on-demand. The | |
411 | * 'reference' page table is init_mm.pgd. | |
412 | * | |
413 | * NOTE! We MUST NOT take any locks for this case. We may | |
414 | * be in an interrupt or a critical region, and should | |
415 | * only copy the information from the master page table, | |
416 | * nothing more. | |
417 | */ | |
418 | if (unlikely(fault_in_kernel_space(address))) { | |
419 | if (vmalloc_fault(address) >= 0) | |
99a596f9 | 420 | return; |
0f60bb25 | 421 | if (notify_page_fault(regs, vec)) |
96e14e54 | 422 | return; |
99a596f9 | 423 | |
5a1dc78a | 424 | bad_area_nosemaphore(regs, error_code, address); |
dbdb4e9f | 425 | return; |
99a596f9 SM |
426 | } |
427 | ||
0f60bb25 | 428 | if (unlikely(notify_page_fault(regs, vec))) |
7433ab77 PM |
429 | return; |
430 | ||
f2fb4e4f | 431 | /* Only enable interrupts if they were on before the fault */ |
7433ab77 | 432 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
f2fb4e4f | 433 | local_irq_enable(); |
f2fb4e4f | 434 | |
a8b0ca17 | 435 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
f2fb4e4f | 436 | |
1da177e4 | 437 | /* |
0f60bb25 PM |
438 | * If we're in an interrupt, have no user context or are running |
439 | * in an atomic region then we must not take the fault: | |
1da177e4 | 440 | */ |
dbdb4e9f | 441 | if (unlikely(in_atomic() || !mm)) { |
5a1dc78a | 442 | bad_area_nosemaphore(regs, error_code, address); |
dbdb4e9f PM |
443 | return; |
444 | } | |
1da177e4 | 445 | |
11fd9824 | 446 | retry: |
1da177e4 LT |
447 | down_read(&mm->mmap_sem); |
448 | ||
449 | vma = find_vma(mm, address); | |
dbdb4e9f | 450 | if (unlikely(!vma)) { |
5a1dc78a | 451 | bad_area(regs, error_code, address); |
dbdb4e9f PM |
452 | return; |
453 | } | |
454 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 455 | goto good_area; |
dbdb4e9f | 456 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
5a1dc78a | 457 | bad_area(regs, error_code, address); |
dbdb4e9f PM |
458 | return; |
459 | } | |
460 | if (unlikely(expand_stack(vma, address))) { | |
5a1dc78a | 461 | bad_area(regs, error_code, address); |
dbdb4e9f PM |
462 | return; |
463 | } | |
0f60bb25 PM |
464 | |
465 | /* | |
466 | * Ok, we have a good vm_area for this memory access, so | |
467 | * we can handle it.. | |
468 | */ | |
1da177e4 | 469 | good_area: |
5a1dc78a PM |
470 | if (unlikely(access_error(error_code, vma))) { |
471 | bad_area_access_error(regs, error_code, address); | |
dbdb4e9f | 472 | return; |
1da177e4 LT |
473 | } |
474 | ||
5a1dc78a PM |
475 | set_thread_fault_code(error_code); |
476 | ||
759496ba JW |
477 | if (user_mode(regs)) |
478 | flags |= FAULT_FLAG_USER; | |
479 | if (error_code & FAULT_CODE_WRITE) | |
480 | flags |= FAULT_FLAG_WRITE; | |
481 | ||
1da177e4 LT |
482 | /* |
483 | * If for any reason at all we couldn't handle the fault, | |
484 | * make sure we exit gracefully rather than endlessly redo | |
485 | * the fault. | |
486 | */ | |
11fd9824 KC |
487 | fault = handle_mm_fault(mm, vma, address, flags); |
488 | ||
dbdb4e9f | 489 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
5a1dc78a | 490 | if (mm_fault_error(regs, error_code, address, fault)) |
dbdb4e9f | 491 | return; |
11fd9824 KC |
492 | |
493 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
494 | if (fault & VM_FAULT_MAJOR) { | |
495 | tsk->maj_flt++; | |
496 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
497 | regs, address); | |
498 | } else { | |
499 | tsk->min_flt++; | |
500 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
501 | regs, address); | |
502 | } | |
503 | if (fault & VM_FAULT_RETRY) { | |
504 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
45cac65b | 505 | flags |= FAULT_FLAG_TRIED; |
11fd9824 KC |
506 | |
507 | /* | |
508 | * No need to up_read(&mm->mmap_sem) as we would | |
509 | * have already released it in __lock_page_or_retry | |
510 | * in mm/filemap.c. | |
511 | */ | |
512 | goto retry; | |
513 | } | |
7433ab77 | 514 | } |
1da177e4 LT |
515 | |
516 | up_read(&mm->mmap_sem); | |
1da177e4 | 517 | } |