]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1995 Linus Torvalds | |
3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. | |
4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar | |
5 | */ | |
6 | #include <linux/magic.h> /* STACK_END_MAGIC */ | |
7 | #include <linux/sched.h> /* test_thread_flag(), ... */ | |
8 | #include <linux/kdebug.h> /* oops_begin/end, ... */ | |
9 | #include <linux/module.h> /* search_exception_table */ | |
10 | #include <linux/bootmem.h> /* max_low_pfn */ | |
11 | #include <linux/kprobes.h> /* __kprobes, ... */ | |
12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ | |
13 | #include <linux/perf_event.h> /* perf_sw_event */ | |
14 | ||
15 | #include <asm/traps.h> /* dotraplinkage, ... */ | |
16 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | |
17 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ | |
18 | ||
19 | /* | |
20 | * Page fault error code bits: | |
21 | * | |
22 | * bit 0 == 0: no page found 1: protection fault | |
23 | * bit 1 == 0: read access 1: write access | |
24 | * bit 2 == 0: kernel-mode access 1: user-mode access | |
25 | * bit 3 == 1: use of reserved bit detected | |
26 | * bit 4 == 1: fault was an instruction fetch | |
27 | */ | |
28 | enum x86_pf_error_code { | |
29 | ||
30 | PF_PROT = 1 << 0, | |
31 | PF_WRITE = 1 << 1, | |
32 | PF_USER = 1 << 2, | |
33 | PF_RSVD = 1 << 3, | |
34 | PF_INSTR = 1 << 4, | |
35 | }; | |
36 | ||
37 | /* | |
38 | * Returns 0 if mmiotrace is disabled, or if the fault is not | |
39 | * handled by mmiotrace: | |
40 | */ | |
41 | static inline int __kprobes | |
42 | kmmio_fault(struct pt_regs *regs, unsigned long addr) | |
43 | { | |
44 | if (unlikely(is_kmmio_active())) | |
45 | if (kmmio_handler(regs, addr) == 1) | |
46 | return -1; | |
47 | return 0; | |
48 | } | |
49 | ||
50 | static inline int __kprobes notify_page_fault(struct pt_regs *regs) | |
51 | { | |
52 | int ret = 0; | |
53 | ||
54 | /* kprobe_running() needs smp_processor_id() */ | |
55 | if (kprobes_built_in() && !user_mode_vm(regs)) { | |
56 | preempt_disable(); | |
57 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
58 | ret = 1; | |
59 | preempt_enable(); | |
60 | } | |
61 | ||
62 | return ret; | |
63 | } | |
64 | ||
65 | /* | |
66 | * Prefetch quirks: | |
67 | * | |
68 | * 32-bit mode: | |
69 | * | |
70 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
71 | * Check that here and ignore it. | |
72 | * | |
73 | * 64-bit mode: | |
74 | * | |
75 | * Sometimes the CPU reports invalid exceptions on prefetch. | |
76 | * Check that here and ignore it. | |
77 | * | |
78 | * Opcode checker based on code by Richard Brunner. | |
79 | */ | |
80 | static inline int | |
81 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
82 | unsigned char opcode, int *prefetch) | |
83 | { | |
84 | unsigned char instr_hi = opcode & 0xf0; | |
85 | unsigned char instr_lo = opcode & 0x0f; | |
86 | ||
87 | switch (instr_hi) { | |
88 | case 0x20: | |
89 | case 0x30: | |
90 | /* | |
91 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
92 | * In X86_64 long mode, the CPU will signal invalid | |
93 | * opcode if some of these prefixes are present so | |
94 | * X86_64 will never get here anyway | |
95 | */ | |
96 | return ((instr_lo & 7) == 0x6); | |
97 | #ifdef CONFIG_X86_64 | |
98 | case 0x40: | |
99 | /* | |
100 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
101 | * Need to figure out under what instruction mode the | |
102 | * instruction was issued. Could check the LDT for lm, | |
103 | * but for now it's good enough to assume that long | |
104 | * mode only uses well known segments or kernel. | |
105 | */ | |
106 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | |
107 | #endif | |
108 | case 0x60: | |
109 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
110 | return (instr_lo & 0xC) == 0x4; | |
111 | case 0xF0: | |
112 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
113 | return !instr_lo || (instr_lo>>1) == 1; | |
114 | case 0x00: | |
115 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
116 | if (probe_kernel_address(instr, opcode)) | |
117 | return 0; | |
118 | ||
119 | *prefetch = (instr_lo == 0xF) && | |
120 | (opcode == 0x0D || opcode == 0x18); | |
121 | return 0; | |
122 | default: | |
123 | return 0; | |
124 | } | |
125 | } | |
126 | ||
127 | static int | |
128 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
129 | { | |
130 | unsigned char *max_instr; | |
131 | unsigned char *instr; | |
132 | int prefetch = 0; | |
133 | ||
134 | /* | |
135 | * If it was a exec (instruction fetch) fault on NX page, then | |
136 | * do not ignore the fault: | |
137 | */ | |
138 | if (error_code & PF_INSTR) | |
139 | return 0; | |
140 | ||
141 | instr = (void *)convert_ip_to_linear(current, regs); | |
142 | max_instr = instr + 15; | |
143 | ||
144 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | |
145 | return 0; | |
146 | ||
147 | while (instr < max_instr) { | |
148 | unsigned char opcode; | |
149 | ||
150 | if (probe_kernel_address(instr, opcode)) | |
151 | break; | |
152 | ||
153 | instr++; | |
154 | ||
155 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) | |
156 | break; | |
157 | } | |
158 | return prefetch; | |
159 | } | |
160 | ||
161 | static void | |
162 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
163 | struct task_struct *tsk) | |
164 | { | |
165 | siginfo_t info; | |
166 | ||
167 | info.si_signo = si_signo; | |
168 | info.si_errno = 0; | |
169 | info.si_code = si_code; | |
170 | info.si_addr = (void __user *)address; | |
171 | info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0; | |
172 | ||
173 | force_sig_info(si_signo, &info, tsk); | |
174 | } | |
175 | ||
176 | DEFINE_SPINLOCK(pgd_lock); | |
177 | LIST_HEAD(pgd_list); | |
178 | ||
179 | #ifdef CONFIG_X86_32 | |
180 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
181 | { | |
182 | unsigned index = pgd_index(address); | |
183 | pgd_t *pgd_k; | |
184 | pud_t *pud, *pud_k; | |
185 | pmd_t *pmd, *pmd_k; | |
186 | ||
187 | pgd += index; | |
188 | pgd_k = init_mm.pgd + index; | |
189 | ||
190 | if (!pgd_present(*pgd_k)) | |
191 | return NULL; | |
192 | ||
193 | /* | |
194 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
195 | * and redundant with the set_pmd() on non-PAE. As would | |
196 | * set_pud. | |
197 | */ | |
198 | pud = pud_offset(pgd, address); | |
199 | pud_k = pud_offset(pgd_k, address); | |
200 | if (!pud_present(*pud_k)) | |
201 | return NULL; | |
202 | ||
203 | pmd = pmd_offset(pud, address); | |
204 | pmd_k = pmd_offset(pud_k, address); | |
205 | if (!pmd_present(*pmd_k)) | |
206 | return NULL; | |
207 | ||
208 | if (!pmd_present(*pmd)) | |
209 | set_pmd(pmd, *pmd_k); | |
210 | else | |
211 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | |
212 | ||
213 | return pmd_k; | |
214 | } | |
215 | ||
216 | void vmalloc_sync_all(void) | |
217 | { | |
218 | unsigned long address; | |
219 | ||
220 | if (SHARED_KERNEL_PMD) | |
221 | return; | |
222 | ||
223 | for (address = VMALLOC_START & PMD_MASK; | |
224 | address >= TASK_SIZE && address < FIXADDR_TOP; | |
225 | address += PMD_SIZE) { | |
226 | ||
227 | unsigned long flags; | |
228 | struct page *page; | |
229 | ||
230 | spin_lock_irqsave(&pgd_lock, flags); | |
231 | list_for_each_entry(page, &pgd_list, lru) { | |
232 | if (!vmalloc_sync_one(page_address(page), address)) | |
233 | break; | |
234 | } | |
235 | spin_unlock_irqrestore(&pgd_lock, flags); | |
236 | } | |
237 | } | |
238 | ||
239 | /* | |
240 | * 32-bit: | |
241 | * | |
242 | * Handle a fault on the vmalloc or module mapping area | |
243 | */ | |
244 | static noinline __kprobes int vmalloc_fault(unsigned long address) | |
245 | { | |
246 | unsigned long pgd_paddr; | |
247 | pmd_t *pmd_k; | |
248 | pte_t *pte_k; | |
249 | ||
250 | /* Make sure we are in vmalloc area: */ | |
251 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
252 | return -1; | |
253 | ||
254 | /* | |
255 | * Synchronize this task's top level page-table | |
256 | * with the 'reference' page table. | |
257 | * | |
258 | * Do _not_ use "current" here. We might be inside | |
259 | * an interrupt in the middle of a task switch.. | |
260 | */ | |
261 | pgd_paddr = read_cr3(); | |
262 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
263 | if (!pmd_k) | |
264 | return -1; | |
265 | ||
266 | pte_k = pte_offset_kernel(pmd_k, address); | |
267 | if (!pte_present(*pte_k)) | |
268 | return -1; | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Did it hit the DOS screen memory VA from vm86 mode? | |
275 | */ | |
276 | static inline void | |
277 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
278 | struct task_struct *tsk) | |
279 | { | |
280 | unsigned long bit; | |
281 | ||
282 | if (!v8086_mode(regs)) | |
283 | return; | |
284 | ||
285 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
286 | if (bit < 32) | |
287 | tsk->thread.screen_bitmap |= 1 << bit; | |
288 | } | |
289 | ||
290 | static bool low_pfn(unsigned long pfn) | |
291 | { | |
292 | return pfn < max_low_pfn; | |
293 | } | |
294 | ||
295 | static void dump_pagetable(unsigned long address) | |
296 | { | |
297 | pgd_t *base = __va(read_cr3()); | |
298 | pgd_t *pgd = &base[pgd_index(address)]; | |
299 | pmd_t *pmd; | |
300 | pte_t *pte; | |
301 | ||
302 | #ifdef CONFIG_X86_PAE | |
303 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); | |
304 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) | |
305 | goto out; | |
306 | #endif | |
307 | pmd = pmd_offset(pud_offset(pgd, address), address); | |
308 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | |
309 | ||
310 | /* | |
311 | * We must not directly access the pte in the highpte | |
312 | * case if the page table is located in highmem. | |
313 | * And let's rather not kmap-atomic the pte, just in case | |
314 | * it's allocated already: | |
315 | */ | |
316 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) | |
317 | goto out; | |
318 | ||
319 | pte = pte_offset_kernel(pmd, address); | |
320 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | |
321 | out: | |
322 | printk("\n"); | |
323 | } | |
324 | ||
325 | #else /* CONFIG_X86_64: */ | |
326 | ||
327 | void vmalloc_sync_all(void) | |
328 | { | |
329 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); | |
330 | } | |
331 | ||
332 | /* | |
333 | * 64-bit: | |
334 | * | |
335 | * Handle a fault on the vmalloc area | |
336 | * | |
337 | * This assumes no large pages in there. | |
338 | */ | |
339 | static noinline __kprobes int vmalloc_fault(unsigned long address) | |
340 | { | |
341 | pgd_t *pgd, *pgd_ref; | |
342 | pud_t *pud, *pud_ref; | |
343 | pmd_t *pmd, *pmd_ref; | |
344 | pte_t *pte, *pte_ref; | |
345 | ||
346 | /* Make sure we are in vmalloc area: */ | |
347 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
348 | return -1; | |
349 | ||
350 | /* | |
351 | * Copy kernel mappings over when needed. This can also | |
352 | * happen within a race in page table update. In the later | |
353 | * case just flush: | |
354 | */ | |
355 | pgd = pgd_offset(current->active_mm, address); | |
356 | pgd_ref = pgd_offset_k(address); | |
357 | if (pgd_none(*pgd_ref)) | |
358 | return -1; | |
359 | ||
360 | if (pgd_none(*pgd)) | |
361 | set_pgd(pgd, *pgd_ref); | |
362 | else | |
363 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
364 | ||
365 | /* | |
366 | * Below here mismatches are bugs because these lower tables | |
367 | * are shared: | |
368 | */ | |
369 | ||
370 | pud = pud_offset(pgd, address); | |
371 | pud_ref = pud_offset(pgd_ref, address); | |
372 | if (pud_none(*pud_ref)) | |
373 | return -1; | |
374 | ||
375 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | |
376 | BUG(); | |
377 | ||
378 | pmd = pmd_offset(pud, address); | |
379 | pmd_ref = pmd_offset(pud_ref, address); | |
380 | if (pmd_none(*pmd_ref)) | |
381 | return -1; | |
382 | ||
383 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | |
384 | BUG(); | |
385 | ||
386 | pte_ref = pte_offset_kernel(pmd_ref, address); | |
387 | if (!pte_present(*pte_ref)) | |
388 | return -1; | |
389 | ||
390 | pte = pte_offset_kernel(pmd, address); | |
391 | ||
392 | /* | |
393 | * Don't use pte_page here, because the mappings can point | |
394 | * outside mem_map, and the NUMA hash lookup cannot handle | |
395 | * that: | |
396 | */ | |
397 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
398 | BUG(); | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | static const char errata93_warning[] = | |
404 | KERN_ERR | |
405 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
406 | "******* Working around it, but it may cause SEGVs or burn power.\n" | |
407 | "******* Please consider a BIOS update.\n" | |
408 | "******* Disabling USB legacy in the BIOS may also help.\n"; | |
409 | ||
410 | /* | |
411 | * No vm86 mode in 64-bit mode: | |
412 | */ | |
413 | static inline void | |
414 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
415 | struct task_struct *tsk) | |
416 | { | |
417 | } | |
418 | ||
419 | static int bad_address(void *p) | |
420 | { | |
421 | unsigned long dummy; | |
422 | ||
423 | return probe_kernel_address((unsigned long *)p, dummy); | |
424 | } | |
425 | ||
426 | static void dump_pagetable(unsigned long address) | |
427 | { | |
428 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); | |
429 | pgd_t *pgd = base + pgd_index(address); | |
430 | pud_t *pud; | |
431 | pmd_t *pmd; | |
432 | pte_t *pte; | |
433 | ||
434 | if (bad_address(pgd)) | |
435 | goto bad; | |
436 | ||
437 | printk("PGD %lx ", pgd_val(*pgd)); | |
438 | ||
439 | if (!pgd_present(*pgd)) | |
440 | goto out; | |
441 | ||
442 | pud = pud_offset(pgd, address); | |
443 | if (bad_address(pud)) | |
444 | goto bad; | |
445 | ||
446 | printk("PUD %lx ", pud_val(*pud)); | |
447 | if (!pud_present(*pud) || pud_large(*pud)) | |
448 | goto out; | |
449 | ||
450 | pmd = pmd_offset(pud, address); | |
451 | if (bad_address(pmd)) | |
452 | goto bad; | |
453 | ||
454 | printk("PMD %lx ", pmd_val(*pmd)); | |
455 | if (!pmd_present(*pmd) || pmd_large(*pmd)) | |
456 | goto out; | |
457 | ||
458 | pte = pte_offset_kernel(pmd, address); | |
459 | if (bad_address(pte)) | |
460 | goto bad; | |
461 | ||
462 | printk("PTE %lx", pte_val(*pte)); | |
463 | out: | |
464 | printk("\n"); | |
465 | return; | |
466 | bad: | |
467 | printk("BAD\n"); | |
468 | } | |
469 | ||
470 | #endif /* CONFIG_X86_64 */ | |
471 | ||
472 | /* | |
473 | * Workaround for K8 erratum #93 & buggy BIOS. | |
474 | * | |
475 | * BIOS SMM functions are required to use a specific workaround | |
476 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
477 | * | |
478 | * A lot of BIOS that didn't get tested properly miss this. | |
479 | * | |
480 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
481 | * Try to work around it here. | |
482 | * | |
483 | * Note we only handle faults in kernel here. | |
484 | * Does nothing on 32-bit. | |
485 | */ | |
486 | static int is_errata93(struct pt_regs *regs, unsigned long address) | |
487 | { | |
488 | #ifdef CONFIG_X86_64 | |
489 | if (address != regs->ip) | |
490 | return 0; | |
491 | ||
492 | if ((address >> 32) != 0) | |
493 | return 0; | |
494 | ||
495 | address |= 0xffffffffUL << 32; | |
496 | if ((address >= (u64)_stext && address <= (u64)_etext) || | |
497 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
498 | printk_once(errata93_warning); | |
499 | regs->ip = address; | |
500 | return 1; | |
501 | } | |
502 | #endif | |
503 | return 0; | |
504 | } | |
505 | ||
506 | /* | |
507 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps | |
508 | * to illegal addresses >4GB. | |
509 | * | |
510 | * We catch this in the page fault handler because these addresses | |
511 | * are not reachable. Just detect this case and return. Any code | |
512 | * segment in LDT is compatibility mode. | |
513 | */ | |
514 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
515 | { | |
516 | #ifdef CONFIG_X86_64 | |
517 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) | |
518 | return 1; | |
519 | #endif | |
520 | return 0; | |
521 | } | |
522 | ||
523 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |
524 | { | |
525 | #ifdef CONFIG_X86_F00F_BUG | |
526 | unsigned long nr; | |
527 | ||
528 | /* | |
529 | * Pentium F0 0F C7 C8 bug workaround: | |
530 | */ | |
531 | if (boot_cpu_data.f00f_bug) { | |
532 | nr = (address - idt_descr.address) >> 3; | |
533 | ||
534 | if (nr == 6) { | |
535 | do_invalid_op(regs, 0); | |
536 | return 1; | |
537 | } | |
538 | } | |
539 | #endif | |
540 | return 0; | |
541 | } | |
542 | ||
543 | static const char nx_warning[] = KERN_CRIT | |
544 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | |
545 | ||
546 | static void | |
547 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
548 | unsigned long address) | |
549 | { | |
550 | if (!oops_may_print()) | |
551 | return; | |
552 | ||
553 | if (error_code & PF_INSTR) { | |
554 | unsigned int level; | |
555 | ||
556 | pte_t *pte = lookup_address(address, &level); | |
557 | ||
558 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | |
559 | printk(nx_warning, current_uid()); | |
560 | } | |
561 | ||
562 | printk(KERN_ALERT "BUG: unable to handle kernel "); | |
563 | if (address < PAGE_SIZE) | |
564 | printk(KERN_CONT "NULL pointer dereference"); | |
565 | else | |
566 | printk(KERN_CONT "paging request"); | |
567 | ||
568 | printk(KERN_CONT " at %p\n", (void *) address); | |
569 | printk(KERN_ALERT "IP:"); | |
570 | printk_address(regs->ip, 1); | |
571 | ||
572 | dump_pagetable(address); | |
573 | } | |
574 | ||
575 | static noinline void | |
576 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
577 | unsigned long address) | |
578 | { | |
579 | struct task_struct *tsk; | |
580 | unsigned long flags; | |
581 | int sig; | |
582 | ||
583 | flags = oops_begin(); | |
584 | tsk = current; | |
585 | sig = SIGKILL; | |
586 | ||
587 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | |
588 | tsk->comm, address); | |
589 | dump_pagetable(address); | |
590 | ||
591 | tsk->thread.cr2 = address; | |
592 | tsk->thread.trap_no = 14; | |
593 | tsk->thread.error_code = error_code; | |
594 | ||
595 | if (__die("Bad pagetable", regs, error_code)) | |
596 | sig = 0; | |
597 | ||
598 | oops_end(flags, regs, sig); | |
599 | } | |
600 | ||
601 | static noinline void | |
602 | no_context(struct pt_regs *regs, unsigned long error_code, | |
603 | unsigned long address) | |
604 | { | |
605 | struct task_struct *tsk = current; | |
606 | unsigned long *stackend; | |
607 | unsigned long flags; | |
608 | int sig; | |
609 | ||
610 | /* Are we prepared to handle this kernel fault? */ | |
611 | if (fixup_exception(regs)) | |
612 | return; | |
613 | ||
614 | /* | |
615 | * 32-bit: | |
616 | * | |
617 | * Valid to do another page fault here, because if this fault | |
618 | * had been triggered by is_prefetch fixup_exception would have | |
619 | * handled it. | |
620 | * | |
621 | * 64-bit: | |
622 | * | |
623 | * Hall of shame of CPU/BIOS bugs. | |
624 | */ | |
625 | if (is_prefetch(regs, error_code, address)) | |
626 | return; | |
627 | ||
628 | if (is_errata93(regs, address)) | |
629 | return; | |
630 | ||
631 | /* | |
632 | * Oops. The kernel tried to access some bad page. We'll have to | |
633 | * terminate things with extreme prejudice: | |
634 | */ | |
635 | flags = oops_begin(); | |
636 | ||
637 | show_fault_oops(regs, error_code, address); | |
638 | ||
639 | stackend = end_of_stack(tsk); | |
640 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) | |
641 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | |
642 | ||
643 | tsk->thread.cr2 = address; | |
644 | tsk->thread.trap_no = 14; | |
645 | tsk->thread.error_code = error_code; | |
646 | ||
647 | sig = SIGKILL; | |
648 | if (__die("Oops", regs, error_code)) | |
649 | sig = 0; | |
650 | ||
651 | /* Executive summary in case the body of the oops scrolled away */ | |
652 | printk(KERN_EMERG "CR2: %016lx\n", address); | |
653 | ||
654 | oops_end(flags, regs, sig); | |
655 | } | |
656 | ||
657 | /* | |
658 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
659 | * sysctl is set: | |
660 | */ | |
661 | static inline void | |
662 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
663 | unsigned long address, struct task_struct *tsk) | |
664 | { | |
665 | if (!unhandled_signal(tsk, SIGSEGV)) | |
666 | return; | |
667 | ||
668 | if (!printk_ratelimit()) | |
669 | return; | |
670 | ||
671 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | |
672 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | |
673 | tsk->comm, task_pid_nr(tsk), address, | |
674 | (void *)regs->ip, (void *)regs->sp, error_code); | |
675 | ||
676 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
677 | ||
678 | printk(KERN_CONT "\n"); | |
679 | } | |
680 | ||
681 | static void | |
682 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
683 | unsigned long address, int si_code) | |
684 | { | |
685 | struct task_struct *tsk = current; | |
686 | ||
687 | /* User mode accesses just cause a SIGSEGV */ | |
688 | if (error_code & PF_USER) { | |
689 | /* | |
690 | * It's possible to have interrupts off here: | |
691 | */ | |
692 | local_irq_enable(); | |
693 | ||
694 | /* | |
695 | * Valid to do another page fault here because this one came | |
696 | * from user space: | |
697 | */ | |
698 | if (is_prefetch(regs, error_code, address)) | |
699 | return; | |
700 | ||
701 | if (is_errata100(regs, address)) | |
702 | return; | |
703 | ||
704 | if (unlikely(show_unhandled_signals)) | |
705 | show_signal_msg(regs, error_code, address, tsk); | |
706 | ||
707 | /* Kernel addresses are always protection faults: */ | |
708 | tsk->thread.cr2 = address; | |
709 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
710 | tsk->thread.trap_no = 14; | |
711 | ||
712 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | |
713 | ||
714 | return; | |
715 | } | |
716 | ||
717 | if (is_f00f_bug(regs, address)) | |
718 | return; | |
719 | ||
720 | no_context(regs, error_code, address); | |
721 | } | |
722 | ||
723 | static noinline void | |
724 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
725 | unsigned long address) | |
726 | { | |
727 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | |
728 | } | |
729 | ||
730 | static void | |
731 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
732 | unsigned long address, int si_code) | |
733 | { | |
734 | struct mm_struct *mm = current->mm; | |
735 | ||
736 | /* | |
737 | * Something tried to access memory that isn't in our memory map.. | |
738 | * Fix it, but check if it's kernel or user first.. | |
739 | */ | |
740 | up_read(&mm->mmap_sem); | |
741 | ||
742 | __bad_area_nosemaphore(regs, error_code, address, si_code); | |
743 | } | |
744 | ||
745 | static noinline void | |
746 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
747 | { | |
748 | __bad_area(regs, error_code, address, SEGV_MAPERR); | |
749 | } | |
750 | ||
751 | static noinline void | |
752 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
753 | unsigned long address) | |
754 | { | |
755 | __bad_area(regs, error_code, address, SEGV_ACCERR); | |
756 | } | |
757 | ||
758 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | |
759 | static void | |
760 | out_of_memory(struct pt_regs *regs, unsigned long error_code, | |
761 | unsigned long address) | |
762 | { | |
763 | /* | |
764 | * We ran out of memory, call the OOM killer, and return the userspace | |
765 | * (which will retry the fault, or kill us if we got oom-killed): | |
766 | */ | |
767 | up_read(¤t->mm->mmap_sem); | |
768 | ||
769 | pagefault_out_of_memory(); | |
770 | } | |
771 | ||
772 | static void | |
773 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | |
774 | unsigned int fault) | |
775 | { | |
776 | struct task_struct *tsk = current; | |
777 | struct mm_struct *mm = tsk->mm; | |
778 | int code = BUS_ADRERR; | |
779 | ||
780 | up_read(&mm->mmap_sem); | |
781 | ||
782 | /* Kernel mode? Handle exceptions or die: */ | |
783 | if (!(error_code & PF_USER)) { | |
784 | no_context(regs, error_code, address); | |
785 | return; | |
786 | } | |
787 | ||
788 | /* User-space => ok to do another page fault: */ | |
789 | if (is_prefetch(regs, error_code, address)) | |
790 | return; | |
791 | ||
792 | tsk->thread.cr2 = address; | |
793 | tsk->thread.error_code = error_code; | |
794 | tsk->thread.trap_no = 14; | |
795 | ||
796 | #ifdef CONFIG_MEMORY_FAILURE | |
797 | if (fault & VM_FAULT_HWPOISON) { | |
798 | printk(KERN_ERR | |
799 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | |
800 | tsk->comm, tsk->pid, address); | |
801 | code = BUS_MCEERR_AR; | |
802 | } | |
803 | #endif | |
804 | force_sig_info_fault(SIGBUS, code, address, tsk); | |
805 | } | |
806 | ||
807 | static noinline void | |
808 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |
809 | unsigned long address, unsigned int fault) | |
810 | { | |
811 | if (fault & VM_FAULT_OOM) { | |
812 | out_of_memory(regs, error_code, address); | |
813 | } else { | |
814 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON)) | |
815 | do_sigbus(regs, error_code, address, fault); | |
816 | else | |
817 | BUG(); | |
818 | } | |
819 | } | |
820 | ||
821 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | |
822 | { | |
823 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
824 | return 0; | |
825 | ||
826 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) | |
827 | return 0; | |
828 | ||
829 | return 1; | |
830 | } | |
831 | ||
832 | /* | |
833 | * Handle a spurious fault caused by a stale TLB entry. | |
834 | * | |
835 | * This allows us to lazily refresh the TLB when increasing the | |
836 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
837 | * eagerly is very expensive since that implies doing a full | |
838 | * cross-processor TLB flush, even if no stale TLB entries exist | |
839 | * on other processors. | |
840 | * | |
841 | * There are no security implications to leaving a stale TLB when | |
842 | * increasing the permissions on a page. | |
843 | */ | |
844 | static noinline __kprobes int | |
845 | spurious_fault(unsigned long error_code, unsigned long address) | |
846 | { | |
847 | pgd_t *pgd; | |
848 | pud_t *pud; | |
849 | pmd_t *pmd; | |
850 | pte_t *pte; | |
851 | int ret; | |
852 | ||
853 | /* Reserved-bit violation or user access to kernel space? */ | |
854 | if (error_code & (PF_USER | PF_RSVD)) | |
855 | return 0; | |
856 | ||
857 | pgd = init_mm.pgd + pgd_index(address); | |
858 | if (!pgd_present(*pgd)) | |
859 | return 0; | |
860 | ||
861 | pud = pud_offset(pgd, address); | |
862 | if (!pud_present(*pud)) | |
863 | return 0; | |
864 | ||
865 | if (pud_large(*pud)) | |
866 | return spurious_fault_check(error_code, (pte_t *) pud); | |
867 | ||
868 | pmd = pmd_offset(pud, address); | |
869 | if (!pmd_present(*pmd)) | |
870 | return 0; | |
871 | ||
872 | if (pmd_large(*pmd)) | |
873 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
874 | ||
875 | pte = pte_offset_kernel(pmd, address); | |
876 | if (!pte_present(*pte)) | |
877 | return 0; | |
878 | ||
879 | ret = spurious_fault_check(error_code, pte); | |
880 | if (!ret) | |
881 | return 0; | |
882 | ||
883 | /* | |
884 | * Make sure we have permissions in PMD. | |
885 | * If not, then there's a bug in the page tables: | |
886 | */ | |
887 | ret = spurious_fault_check(error_code, (pte_t *) pmd); | |
888 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | |
889 | ||
890 | return ret; | |
891 | } | |
892 | ||
893 | int show_unhandled_signals = 1; | |
894 | ||
895 | static inline int | |
896 | access_error(unsigned long error_code, int write, struct vm_area_struct *vma) | |
897 | { | |
898 | if (write) { | |
899 | /* write, present and write, not present: */ | |
900 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
901 | return 1; | |
902 | return 0; | |
903 | } | |
904 | ||
905 | /* read, present: */ | |
906 | if (unlikely(error_code & PF_PROT)) | |
907 | return 1; | |
908 | ||
909 | /* read, not present: */ | |
910 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
911 | return 1; | |
912 | ||
913 | return 0; | |
914 | } | |
915 | ||
916 | static int fault_in_kernel_space(unsigned long address) | |
917 | { | |
918 | return address >= TASK_SIZE_MAX; | |
919 | } | |
920 | ||
921 | /* | |
922 | * This routine handles page faults. It determines the address, | |
923 | * and the problem, and then passes it off to one of the appropriate | |
924 | * routines. | |
925 | */ | |
926 | dotraplinkage void __kprobes | |
927 | do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
928 | { | |
929 | struct vm_area_struct *vma; | |
930 | struct task_struct *tsk; | |
931 | unsigned long address; | |
932 | struct mm_struct *mm; | |
933 | int write; | |
934 | int fault; | |
935 | ||
936 | tsk = current; | |
937 | mm = tsk->mm; | |
938 | ||
939 | /* Get the faulting address: */ | |
940 | address = read_cr2(); | |
941 | ||
942 | /* | |
943 | * Detect and handle instructions that would cause a page fault for | |
944 | * both a tracked kernel page and a userspace page. | |
945 | */ | |
946 | if (kmemcheck_active(regs)) | |
947 | kmemcheck_hide(regs); | |
948 | prefetchw(&mm->mmap_sem); | |
949 | ||
950 | if (unlikely(kmmio_fault(regs, address))) | |
951 | return; | |
952 | ||
953 | /* | |
954 | * We fault-in kernel-space virtual memory on-demand. The | |
955 | * 'reference' page table is init_mm.pgd. | |
956 | * | |
957 | * NOTE! We MUST NOT take any locks for this case. We may | |
958 | * be in an interrupt or a critical region, and should | |
959 | * only copy the information from the master page table, | |
960 | * nothing more. | |
961 | * | |
962 | * This verifies that the fault happens in kernel space | |
963 | * (error_code & 4) == 0, and that the fault was not a | |
964 | * protection error (error_code & 9) == 0. | |
965 | */ | |
966 | if (unlikely(fault_in_kernel_space(address))) { | |
967 | if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { | |
968 | if (vmalloc_fault(address) >= 0) | |
969 | return; | |
970 | ||
971 | if (kmemcheck_fault(regs, address, error_code)) | |
972 | return; | |
973 | } | |
974 | ||
975 | /* Can handle a stale RO->RW TLB: */ | |
976 | if (spurious_fault(error_code, address)) | |
977 | return; | |
978 | ||
979 | /* kprobes don't want to hook the spurious faults: */ | |
980 | if (notify_page_fault(regs)) | |
981 | return; | |
982 | /* | |
983 | * Don't take the mm semaphore here. If we fixup a prefetch | |
984 | * fault we could otherwise deadlock: | |
985 | */ | |
986 | bad_area_nosemaphore(regs, error_code, address); | |
987 | ||
988 | return; | |
989 | } | |
990 | ||
991 | /* kprobes don't want to hook the spurious faults: */ | |
992 | if (unlikely(notify_page_fault(regs))) | |
993 | return; | |
994 | /* | |
995 | * It's safe to allow irq's after cr2 has been saved and the | |
996 | * vmalloc fault has been handled. | |
997 | * | |
998 | * User-mode registers count as a user access even for any | |
999 | * potential system fault or CPU buglet: | |
1000 | */ | |
1001 | if (user_mode_vm(regs)) { | |
1002 | local_irq_enable(); | |
1003 | error_code |= PF_USER; | |
1004 | } else { | |
1005 | if (regs->flags & X86_EFLAGS_IF) | |
1006 | local_irq_enable(); | |
1007 | } | |
1008 | ||
1009 | if (unlikely(error_code & PF_RSVD)) | |
1010 | pgtable_bad(regs, error_code, address); | |
1011 | ||
1012 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
1013 | ||
1014 | /* | |
1015 | * If we're in an interrupt, have no user context or are running | |
1016 | * in an atomic region then we must not take the fault: | |
1017 | */ | |
1018 | if (unlikely(in_atomic() || !mm)) { | |
1019 | bad_area_nosemaphore(regs, error_code, address); | |
1020 | return; | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * When running in the kernel we expect faults to occur only to | |
1025 | * addresses in user space. All other faults represent errors in | |
1026 | * the kernel and should generate an OOPS. Unfortunately, in the | |
1027 | * case of an erroneous fault occurring in a code path which already | |
1028 | * holds mmap_sem we will deadlock attempting to validate the fault | |
1029 | * against the address space. Luckily the kernel only validly | |
1030 | * references user space from well defined areas of code, which are | |
1031 | * listed in the exceptions table. | |
1032 | * | |
1033 | * As the vast majority of faults will be valid we will only perform | |
1034 | * the source reference check when there is a possibility of a | |
1035 | * deadlock. Attempt to lock the address space, if we cannot we then | |
1036 | * validate the source. If this is invalid we can skip the address | |
1037 | * space check, thus avoiding the deadlock: | |
1038 | */ | |
1039 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | |
1040 | if ((error_code & PF_USER) == 0 && | |
1041 | !search_exception_tables(regs->ip)) { | |
1042 | bad_area_nosemaphore(regs, error_code, address); | |
1043 | return; | |
1044 | } | |
1045 | down_read(&mm->mmap_sem); | |
1046 | } else { | |
1047 | /* | |
1048 | * The above down_read_trylock() might have succeeded in | |
1049 | * which case we'll have missed the might_sleep() from | |
1050 | * down_read(): | |
1051 | */ | |
1052 | might_sleep(); | |
1053 | } | |
1054 | ||
1055 | vma = find_vma(mm, address); | |
1056 | if (unlikely(!vma)) { | |
1057 | bad_area(regs, error_code, address); | |
1058 | return; | |
1059 | } | |
1060 | if (likely(vma->vm_start <= address)) | |
1061 | goto good_area; | |
1062 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { | |
1063 | bad_area(regs, error_code, address); | |
1064 | return; | |
1065 | } | |
1066 | if (error_code & PF_USER) { | |
1067 | /* | |
1068 | * Accessing the stack below %sp is always a bug. | |
1069 | * The large cushion allows instructions like enter | |
1070 | * and pusha to work. ("enter $65535, $31" pushes | |
1071 | * 32 pointers and then decrements %sp by 65535.) | |
1072 | */ | |
1073 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { | |
1074 | bad_area(regs, error_code, address); | |
1075 | return; | |
1076 | } | |
1077 | } | |
1078 | if (unlikely(expand_stack(vma, address))) { | |
1079 | bad_area(regs, error_code, address); | |
1080 | return; | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * Ok, we have a good vm_area for this memory access, so | |
1085 | * we can handle it.. | |
1086 | */ | |
1087 | good_area: | |
1088 | write = error_code & PF_WRITE; | |
1089 | ||
1090 | if (unlikely(access_error(error_code, write, vma))) { | |
1091 | bad_area_access_error(regs, error_code, address); | |
1092 | return; | |
1093 | } | |
1094 | ||
1095 | /* | |
1096 | * If for any reason at all we couldn't handle the fault, | |
1097 | * make sure we exit gracefully rather than endlessly redo | |
1098 | * the fault: | |
1099 | */ | |
1100 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | |
1101 | ||
1102 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
1103 | mm_fault_error(regs, error_code, address, fault); | |
1104 | return; | |
1105 | } | |
1106 | ||
1107 | if (fault & VM_FAULT_MAJOR) { | |
1108 | tsk->maj_flt++; | |
1109 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
1110 | regs, address); | |
1111 | } else { | |
1112 | tsk->min_flt++; | |
1113 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
1114 | regs, address); | |
1115 | } | |
1116 | ||
1117 | check_v8086_mode(regs, address, tsk); | |
1118 | ||
1119 | up_read(&mm->mmap_sem); | |
1120 | } |