]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1995 Linus Torvalds |
2d4a7167 | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
1da177e4 | 4 | */ |
1da177e4 | 5 | #include <linux/interrupt.h> |
2d4a7167 IM |
6 | #include <linux/mmiotrace.h> |
7 | #include <linux/bootmem.h> | |
1da177e4 | 8 | #include <linux/compiler.h> |
c61e211d | 9 | #include <linux/highmem.h> |
0f2fbdcb | 10 | #include <linux/kprobes.h> |
ab2bf0c1 | 11 | #include <linux/uaccess.h> |
2d4a7167 IM |
12 | #include <linux/vmalloc.h> |
13 | #include <linux/vt_kern.h> | |
14 | #include <linux/signal.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/module.h> | |
1eeb66a1 | 19 | #include <linux/kdebug.h> |
2d4a7167 | 20 | #include <linux/errno.h> |
7c9f8861 | 21 | #include <linux/magic.h> |
2d4a7167 IM |
22 | #include <linux/sched.h> |
23 | #include <linux/types.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/mman.h> | |
26 | #include <linux/tty.h> | |
27 | #include <linux/smp.h> | |
28 | #include <linux/mm.h> | |
29 | ||
30 | #include <asm-generic/sections.h> | |
1da177e4 | 31 | |
1da177e4 | 32 | #include <asm/tlbflush.h> |
2d4a7167 IM |
33 | #include <asm/pgalloc.h> |
34 | #include <asm/segment.h> | |
35 | #include <asm/system.h> | |
1da177e4 | 36 | #include <asm/proto.h> |
70ef5641 | 37 | #include <asm/traps.h> |
2d4a7167 | 38 | #include <asm/desc.h> |
1da177e4 | 39 | |
33cb5243 | 40 | /* |
2d4a7167 IM |
41 | * Page fault error code bits: |
42 | * | |
43 | * bit 0 == 0: no page found 1: protection fault | |
44 | * bit 1 == 0: read access 1: write access | |
45 | * bit 2 == 0: kernel-mode access 1: user-mode access | |
46 | * bit 3 == 1: use of reserved bit detected | |
47 | * bit 4 == 1: fault was an instruction fetch | |
33cb5243 | 48 | */ |
2d4a7167 IM |
49 | enum x86_pf_error_code { |
50 | ||
51 | PF_PROT = 1 << 0, | |
52 | PF_WRITE = 1 << 1, | |
53 | PF_USER = 1 << 2, | |
54 | PF_RSVD = 1 << 3, | |
55 | PF_INSTR = 1 << 4, | |
56 | }; | |
66c58156 | 57 | |
b814d41f IM |
58 | /* |
59 | * (returns 0 if mmiotrace is disabled) | |
60 | */ | |
0fd0e3da | 61 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) |
86069782 | 62 | { |
0fd0e3da PP |
63 | if (unlikely(is_kmmio_active())) |
64 | if (kmmio_handler(regs, addr) == 1) | |
65 | return -1; | |
0fd0e3da | 66 | return 0; |
86069782 PP |
67 | } |
68 | ||
74a0b576 | 69 | static inline int notify_page_fault(struct pt_regs *regs) |
1bd858a5 | 70 | { |
33cb5243 | 71 | #ifdef CONFIG_KPROBES |
74a0b576 CH |
72 | int ret = 0; |
73 | ||
74 | /* kprobe_running() needs smp_processor_id() */ | |
f8c2ee22 | 75 | if (!user_mode_vm(regs)) { |
74a0b576 CH |
76 | preempt_disable(); |
77 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
78 | ret = 1; | |
79 | preempt_enable(); | |
80 | } | |
1bd858a5 | 81 | |
74a0b576 | 82 | return ret; |
74a0b576 | 83 | #else |
74a0b576 | 84 | return 0; |
74a0b576 | 85 | #endif |
33cb5243 | 86 | } |
1bd858a5 | 87 | |
1dc85be0 | 88 | /* |
2d4a7167 IM |
89 | * Prefetch quirks: |
90 | * | |
91 | * 32-bit mode: | |
92 | * | |
93 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
94 | * Check that here and ignore it. | |
1dc85be0 | 95 | * |
2d4a7167 | 96 | * 64-bit mode: |
1dc85be0 | 97 | * |
2d4a7167 IM |
98 | * Sometimes the CPU reports invalid exceptions on prefetch. |
99 | * Check that here and ignore it. | |
100 | * | |
101 | * Opcode checker based on code by Richard Brunner. | |
1dc85be0 | 102 | */ |
107a0367 IM |
103 | static inline int |
104 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
105 | unsigned char opcode, int *prefetch) | |
106 | { | |
107 | unsigned char instr_hi = opcode & 0xf0; | |
108 | unsigned char instr_lo = opcode & 0x0f; | |
109 | ||
110 | switch (instr_hi) { | |
111 | case 0x20: | |
112 | case 0x30: | |
113 | /* | |
114 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
115 | * In X86_64 long mode, the CPU will signal invalid | |
116 | * opcode if some of these prefixes are present so | |
117 | * X86_64 will never get here anyway | |
118 | */ | |
119 | return ((instr_lo & 7) == 0x6); | |
120 | #ifdef CONFIG_X86_64 | |
121 | case 0x40: | |
122 | /* | |
123 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
124 | * Need to figure out under what instruction mode the | |
125 | * instruction was issued. Could check the LDT for lm, | |
126 | * but for now it's good enough to assume that long | |
127 | * mode only uses well known segments or kernel. | |
128 | */ | |
129 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | |
130 | #endif | |
131 | case 0x60: | |
132 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
133 | return (instr_lo & 0xC) == 0x4; | |
134 | case 0xF0: | |
135 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
136 | return !instr_lo || (instr_lo>>1) == 1; | |
137 | case 0x00: | |
138 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
139 | if (probe_kernel_address(instr, opcode)) | |
140 | return 0; | |
141 | ||
142 | *prefetch = (instr_lo == 0xF) && | |
143 | (opcode == 0x0D || opcode == 0x18); | |
144 | return 0; | |
145 | default: | |
146 | return 0; | |
147 | } | |
148 | } | |
149 | ||
2d4a7167 IM |
150 | static int |
151 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
33cb5243 | 152 | { |
2d4a7167 | 153 | unsigned char *max_instr; |
ab2bf0c1 | 154 | unsigned char *instr; |
33cb5243 | 155 | int prefetch = 0; |
1da177e4 | 156 | |
3085354d IM |
157 | /* |
158 | * If it was a exec (instruction fetch) fault on NX page, then | |
159 | * do not ignore the fault: | |
160 | */ | |
66c58156 | 161 | if (error_code & PF_INSTR) |
1da177e4 | 162 | return 0; |
1dc85be0 | 163 | |
107a0367 | 164 | instr = (void *)convert_ip_to_linear(current, regs); |
f1290ec9 | 165 | max_instr = instr + 15; |
1da177e4 | 166 | |
76381fee | 167 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
1da177e4 LT |
168 | return 0; |
169 | ||
107a0367 | 170 | while (instr < max_instr) { |
2d4a7167 | 171 | unsigned char opcode; |
1da177e4 | 172 | |
ab2bf0c1 | 173 | if (probe_kernel_address(instr, opcode)) |
33cb5243 | 174 | break; |
1da177e4 | 175 | |
1da177e4 LT |
176 | instr++; |
177 | ||
107a0367 | 178 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
1da177e4 | 179 | break; |
1da177e4 LT |
180 | } |
181 | return prefetch; | |
182 | } | |
183 | ||
2d4a7167 IM |
184 | static void |
185 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
186 | struct task_struct *tsk) | |
c4aba4a8 HH |
187 | { |
188 | siginfo_t info; | |
189 | ||
2d4a7167 IM |
190 | info.si_signo = si_signo; |
191 | info.si_errno = 0; | |
192 | info.si_code = si_code; | |
193 | info.si_addr = (void __user *)address; | |
194 | ||
c4aba4a8 HH |
195 | force_sig_info(si_signo, &info, tsk); |
196 | } | |
197 | ||
1156e098 | 198 | #ifdef CONFIG_X86_64 |
33cb5243 HH |
199 | static int bad_address(void *p) |
200 | { | |
1da177e4 | 201 | unsigned long dummy; |
2d4a7167 | 202 | |
ab2bf0c1 | 203 | return probe_kernel_address((unsigned long *)p, dummy); |
33cb5243 | 204 | } |
1156e098 | 205 | #endif |
1da177e4 | 206 | |
cae30f82 | 207 | static void dump_pagetable(unsigned long address) |
1da177e4 | 208 | { |
1156e098 HH |
209 | #ifdef CONFIG_X86_32 |
210 | __typeof__(pte_val(__pte(0))) page; | |
211 | ||
212 | page = read_cr3(); | |
213 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | |
2d4a7167 | 214 | |
1156e098 HH |
215 | #ifdef CONFIG_X86_PAE |
216 | printk("*pdpt = %016Lx ", page); | |
217 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
218 | && page & _PAGE_PRESENT) { | |
219 | page &= PAGE_MASK; | |
220 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | |
2d4a7167 | 221 | & (PTRS_PER_PMD - 1)]; |
1156e098 HH |
222 | printk(KERN_CONT "*pde = %016Lx ", page); |
223 | page &= ~_PAGE_NX; | |
224 | } | |
225 | #else | |
226 | printk("*pde = %08lx ", page); | |
227 | #endif | |
228 | ||
229 | /* | |
230 | * We must not directly access the pte in the highpte | |
231 | * case if the page table is located in highmem. | |
232 | * And let's rather not kmap-atomic the pte, just in case | |
2d4a7167 | 233 | * it's allocated already: |
1156e098 HH |
234 | */ |
235 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
236 | && (page & _PAGE_PRESENT) | |
237 | && !(page & _PAGE_PSE)) { | |
2d4a7167 | 238 | |
1156e098 HH |
239 | page &= PAGE_MASK; |
240 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | |
2d4a7167 | 241 | & (PTRS_PER_PTE - 1)]; |
1156e098 HH |
242 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
243 | } | |
244 | ||
245 | printk("\n"); | |
246 | #else /* CONFIG_X86_64 */ | |
1da177e4 LT |
247 | pgd_t *pgd; |
248 | pud_t *pud; | |
249 | pmd_t *pmd; | |
250 | pte_t *pte; | |
251 | ||
f51c9452 | 252 | pgd = (pgd_t *)read_cr3(); |
1da177e4 | 253 | |
33cb5243 | 254 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); |
2d4a7167 | 255 | |
1da177e4 | 256 | pgd += pgd_index(address); |
2d4a7167 IM |
257 | if (bad_address(pgd)) |
258 | goto bad; | |
259 | ||
d646bce4 | 260 | printk("PGD %lx ", pgd_val(*pgd)); |
2d4a7167 IM |
261 | |
262 | if (!pgd_present(*pgd)) | |
263 | goto out; | |
1da177e4 | 264 | |
d2ae5b5f | 265 | pud = pud_offset(pgd, address); |
2d4a7167 IM |
266 | if (bad_address(pud)) |
267 | goto bad; | |
268 | ||
1da177e4 | 269 | printk("PUD %lx ", pud_val(*pud)); |
b5360222 | 270 | if (!pud_present(*pud) || pud_large(*pud)) |
2d4a7167 | 271 | goto out; |
1da177e4 LT |
272 | |
273 | pmd = pmd_offset(pud, address); | |
2d4a7167 IM |
274 | if (bad_address(pmd)) |
275 | goto bad; | |
276 | ||
1da177e4 | 277 | printk("PMD %lx ", pmd_val(*pmd)); |
2d4a7167 IM |
278 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
279 | goto out; | |
1da177e4 LT |
280 | |
281 | pte = pte_offset_kernel(pmd, address); | |
2d4a7167 IM |
282 | if (bad_address(pte)) |
283 | goto bad; | |
284 | ||
33cb5243 | 285 | printk("PTE %lx", pte_val(*pte)); |
2d4a7167 | 286 | out: |
1da177e4 LT |
287 | printk("\n"); |
288 | return; | |
289 | bad: | |
290 | printk("BAD\n"); | |
1156e098 HH |
291 | #endif |
292 | } | |
293 | ||
294 | #ifdef CONFIG_X86_32 | |
295 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
296 | { | |
297 | unsigned index = pgd_index(address); | |
298 | pgd_t *pgd_k; | |
299 | pud_t *pud, *pud_k; | |
300 | pmd_t *pmd, *pmd_k; | |
301 | ||
302 | pgd += index; | |
303 | pgd_k = init_mm.pgd + index; | |
304 | ||
305 | if (!pgd_present(*pgd_k)) | |
306 | return NULL; | |
307 | ||
308 | /* | |
309 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
310 | * and redundant with the set_pmd() on non-PAE. As would | |
311 | * set_pud. | |
312 | */ | |
1156e098 HH |
313 | pud = pud_offset(pgd, address); |
314 | pud_k = pud_offset(pgd_k, address); | |
315 | if (!pud_present(*pud_k)) | |
316 | return NULL; | |
317 | ||
318 | pmd = pmd_offset(pud, address); | |
319 | pmd_k = pmd_offset(pud_k, address); | |
320 | if (!pmd_present(*pmd_k)) | |
321 | return NULL; | |
2d4a7167 | 322 | |
1156e098 HH |
323 | if (!pmd_present(*pmd)) { |
324 | set_pmd(pmd, *pmd_k); | |
325 | arch_flush_lazy_mmu_mode(); | |
2d4a7167 | 326 | } else { |
1156e098 | 327 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
2d4a7167 IM |
328 | } |
329 | ||
1156e098 | 330 | return pmd_k; |
1da177e4 LT |
331 | } |
332 | ||
8c938f9f IM |
333 | /* |
334 | * Did it hit the DOS screen memory VA from vm86 mode? | |
335 | */ | |
336 | static inline void | |
337 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
338 | struct task_struct *tsk) | |
339 | { | |
340 | unsigned long bit; | |
341 | ||
342 | if (!v8086_mode(regs)) | |
343 | return; | |
344 | ||
345 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
346 | if (bit < 32) | |
347 | tsk->thread.screen_bitmap |= 1 << bit; | |
348 | } | |
349 | ||
350 | #else /* CONFIG_X86_64: */ | |
351 | ||
33cb5243 | 352 | static const char errata93_warning[] = |
1da177e4 LT |
353 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" |
354 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | |
355 | KERN_ERR "******* Please consider a BIOS update.\n" | |
356 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | |
8c938f9f IM |
357 | |
358 | /* | |
359 | * No vm86 mode in 64-bit mode: | |
360 | */ | |
361 | static inline void | |
362 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
363 | struct task_struct *tsk) | |
364 | { | |
365 | } | |
366 | ||
fdfe8aa8 | 367 | #endif |
1da177e4 | 368 | |
2d4a7167 IM |
369 | /* |
370 | * Workaround for K8 erratum #93 & buggy BIOS. | |
371 | * | |
372 | * BIOS SMM functions are required to use a specific workaround | |
373 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
374 | * | |
375 | * A lot of BIOS that didn't get tested properly miss this. | |
376 | * | |
377 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
378 | * Try to work around it here. | |
379 | * | |
380 | * Note we only handle faults in kernel here. | |
381 | * Does nothing on 32-bit. | |
fdfe8aa8 | 382 | */ |
33cb5243 | 383 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
1da177e4 | 384 | { |
fdfe8aa8 | 385 | #ifdef CONFIG_X86_64 |
2d4a7167 IM |
386 | static int once; |
387 | ||
65ea5b03 | 388 | if (address != regs->ip) |
1da177e4 | 389 | return 0; |
2d4a7167 | 390 | |
33cb5243 | 391 | if ((address >> 32) != 0) |
1da177e4 | 392 | return 0; |
2d4a7167 | 393 | |
1da177e4 | 394 | address |= 0xffffffffUL << 32; |
33cb5243 HH |
395 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
396 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
2d4a7167 | 397 | if (!once) { |
33cb5243 | 398 | printk(errata93_warning); |
2d4a7167 | 399 | once = 1; |
1da177e4 | 400 | } |
65ea5b03 | 401 | regs->ip = address; |
1da177e4 LT |
402 | return 1; |
403 | } | |
fdfe8aa8 | 404 | #endif |
1da177e4 | 405 | return 0; |
33cb5243 | 406 | } |
1da177e4 | 407 | |
35f3266f | 408 | /* |
2d4a7167 IM |
409 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
410 | * to illegal addresses >4GB. | |
411 | * | |
412 | * We catch this in the page fault handler because these addresses | |
413 | * are not reachable. Just detect this case and return. Any code | |
35f3266f HH |
414 | * segment in LDT is compatibility mode. |
415 | */ | |
416 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
417 | { | |
418 | #ifdef CONFIG_X86_64 | |
2d4a7167 | 419 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
35f3266f HH |
420 | return 1; |
421 | #endif | |
422 | return 0; | |
423 | } | |
424 | ||
29caf2f9 HH |
425 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
426 | { | |
427 | #ifdef CONFIG_X86_F00F_BUG | |
428 | unsigned long nr; | |
2d4a7167 | 429 | |
29caf2f9 | 430 | /* |
2d4a7167 | 431 | * Pentium F0 0F C7 C8 bug workaround: |
29caf2f9 HH |
432 | */ |
433 | if (boot_cpu_data.f00f_bug) { | |
434 | nr = (address - idt_descr.address) >> 3; | |
435 | ||
436 | if (nr == 6) { | |
437 | do_invalid_op(regs, 0); | |
438 | return 1; | |
439 | } | |
440 | } | |
441 | #endif | |
442 | return 0; | |
443 | } | |
444 | ||
2d4a7167 IM |
445 | static void |
446 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
447 | unsigned long address) | |
b3279c7f | 448 | { |
1156e098 HH |
449 | #ifdef CONFIG_X86_32 |
450 | if (!oops_may_print()) | |
451 | return; | |
fd40d6e3 | 452 | #endif |
1156e098 HH |
453 | |
454 | #ifdef CONFIG_X86_PAE | |
455 | if (error_code & PF_INSTR) { | |
93809be8 | 456 | unsigned int level; |
2d4a7167 | 457 | |
1156e098 HH |
458 | pte_t *pte = lookup_address(address, &level); |
459 | ||
2d4a7167 | 460 | if (pte && pte_present(*pte) && !pte_exec(*pte)) { |
1156e098 HH |
461 | printk(KERN_CRIT "kernel tried to execute " |
462 | "NX-protected page - exploit attempt? " | |
350b4da7 | 463 | "(uid: %d)\n", current_uid()); |
2d4a7167 | 464 | } |
1156e098 HH |
465 | } |
466 | #endif | |
1156e098 | 467 | |
19f0dda9 | 468 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
b3279c7f | 469 | if (address < PAGE_SIZE) |
19f0dda9 | 470 | printk(KERN_CONT "NULL pointer dereference"); |
b3279c7f | 471 | else |
19f0dda9 | 472 | printk(KERN_CONT "paging request"); |
2d4a7167 | 473 | |
f294a8ce | 474 | printk(KERN_CONT " at %p\n", (void *) address); |
19f0dda9 | 475 | printk(KERN_ALERT "IP:"); |
b3279c7f | 476 | printk_address(regs->ip, 1); |
2d4a7167 | 477 | |
b3279c7f HH |
478 | dump_pagetable(address); |
479 | } | |
480 | ||
2d4a7167 IM |
481 | static noinline void |
482 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
483 | unsigned long address) | |
1da177e4 | 484 | { |
2d4a7167 IM |
485 | struct task_struct *tsk; |
486 | unsigned long flags; | |
487 | int sig; | |
488 | ||
489 | flags = oops_begin(); | |
490 | tsk = current; | |
491 | sig = SIGKILL; | |
1209140c | 492 | |
1da177e4 | 493 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
92181f19 | 494 | tsk->comm, address); |
1da177e4 | 495 | dump_pagetable(address); |
2d4a7167 IM |
496 | |
497 | tsk->thread.cr2 = address; | |
498 | tsk->thread.trap_no = 14; | |
499 | tsk->thread.error_code = error_code; | |
500 | ||
22f5991c | 501 | if (__die("Bad pagetable", regs, error_code)) |
874d93d1 | 502 | sig = 0; |
2d4a7167 | 503 | |
874d93d1 | 504 | oops_end(flags, regs, sig); |
1da177e4 LT |
505 | } |
506 | ||
2d4a7167 IM |
507 | static noinline void |
508 | no_context(struct pt_regs *regs, unsigned long error_code, | |
509 | unsigned long address) | |
92181f19 NP |
510 | { |
511 | struct task_struct *tsk = current; | |
19803078 IM |
512 | unsigned long *stackend; |
513 | ||
92181f19 NP |
514 | #ifdef CONFIG_X86_64 |
515 | unsigned long flags; | |
516 | int sig; | |
517 | #endif | |
518 | ||
2d4a7167 | 519 | /* Are we prepared to handle this kernel fault? */ |
92181f19 NP |
520 | if (fixup_exception(regs)) |
521 | return; | |
522 | ||
523 | /* | |
2d4a7167 IM |
524 | * 32-bit: |
525 | * | |
526 | * Valid to do another page fault here, because if this fault | |
527 | * had been triggered by is_prefetch fixup_exception would have | |
528 | * handled it. | |
529 | * | |
530 | * 64-bit: | |
92181f19 | 531 | * |
2d4a7167 | 532 | * Hall of shame of CPU/BIOS bugs. |
92181f19 NP |
533 | */ |
534 | if (is_prefetch(regs, error_code, address)) | |
535 | return; | |
536 | ||
537 | if (is_errata93(regs, address)) | |
538 | return; | |
539 | ||
540 | /* | |
541 | * Oops. The kernel tried to access some bad page. We'll have to | |
2d4a7167 | 542 | * terminate things with extreme prejudice: |
92181f19 NP |
543 | */ |
544 | #ifdef CONFIG_X86_32 | |
545 | bust_spinlocks(1); | |
546 | #else | |
547 | flags = oops_begin(); | |
548 | #endif | |
549 | ||
550 | show_fault_oops(regs, error_code, address); | |
551 | ||
2d4a7167 | 552 | stackend = end_of_stack(tsk); |
19803078 IM |
553 | if (*stackend != STACK_END_MAGIC) |
554 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | |
555 | ||
92181f19 NP |
556 | tsk->thread.cr2 = address; |
557 | tsk->thread.trap_no = 14; | |
558 | tsk->thread.error_code = error_code; | |
559 | ||
560 | #ifdef CONFIG_X86_32 | |
561 | die("Oops", regs, error_code); | |
562 | bust_spinlocks(0); | |
563 | do_exit(SIGKILL); | |
564 | #else | |
565 | sig = SIGKILL; | |
566 | if (__die("Oops", regs, error_code)) | |
567 | sig = 0; | |
2d4a7167 | 568 | |
92181f19 NP |
569 | /* Executive summary in case the body of the oops scrolled away */ |
570 | printk(KERN_EMERG "CR2: %016lx\n", address); | |
2d4a7167 | 571 | |
92181f19 NP |
572 | oops_end(flags, regs, sig); |
573 | #endif | |
574 | } | |
575 | ||
2d4a7167 IM |
576 | /* |
577 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
578 | * sysctl is set: | |
579 | */ | |
580 | static inline void | |
581 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
582 | unsigned long address, struct task_struct *tsk) | |
583 | { | |
584 | if (!unhandled_signal(tsk, SIGSEGV)) | |
585 | return; | |
586 | ||
587 | if (!printk_ratelimit()) | |
588 | return; | |
589 | ||
590 | printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | |
591 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | |
592 | tsk->comm, task_pid_nr(tsk), address, | |
593 | (void *)regs->ip, (void *)regs->sp, error_code); | |
594 | ||
595 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
596 | ||
597 | printk(KERN_CONT "\n"); | |
598 | } | |
599 | ||
600 | static void | |
601 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
602 | unsigned long address, int si_code) | |
92181f19 NP |
603 | { |
604 | struct task_struct *tsk = current; | |
605 | ||
606 | /* User mode accesses just cause a SIGSEGV */ | |
607 | if (error_code & PF_USER) { | |
608 | /* | |
2d4a7167 | 609 | * It's possible to have interrupts off here: |
92181f19 NP |
610 | */ |
611 | local_irq_enable(); | |
612 | ||
613 | /* | |
614 | * Valid to do another page fault here because this one came | |
2d4a7167 | 615 | * from user space: |
92181f19 NP |
616 | */ |
617 | if (is_prefetch(regs, error_code, address)) | |
618 | return; | |
619 | ||
620 | if (is_errata100(regs, address)) | |
621 | return; | |
622 | ||
2d4a7167 IM |
623 | if (unlikely(show_unhandled_signals)) |
624 | show_signal_msg(regs, error_code, address, tsk); | |
625 | ||
626 | /* Kernel addresses are always protection faults: */ | |
627 | tsk->thread.cr2 = address; | |
628 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
629 | tsk->thread.trap_no = 14; | |
92181f19 | 630 | |
92181f19 | 631 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
2d4a7167 | 632 | |
92181f19 NP |
633 | return; |
634 | } | |
635 | ||
636 | if (is_f00f_bug(regs, address)) | |
637 | return; | |
638 | ||
639 | no_context(regs, error_code, address); | |
640 | } | |
641 | ||
2d4a7167 IM |
642 | static noinline void |
643 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
644 | unsigned long address) | |
92181f19 NP |
645 | { |
646 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | |
647 | } | |
648 | ||
2d4a7167 IM |
649 | static void |
650 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
651 | unsigned long address, int si_code) | |
92181f19 NP |
652 | { |
653 | struct mm_struct *mm = current->mm; | |
654 | ||
655 | /* | |
656 | * Something tried to access memory that isn't in our memory map.. | |
657 | * Fix it, but check if it's kernel or user first.. | |
658 | */ | |
659 | up_read(&mm->mmap_sem); | |
660 | ||
661 | __bad_area_nosemaphore(regs, error_code, address, si_code); | |
662 | } | |
663 | ||
2d4a7167 IM |
664 | static noinline void |
665 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 NP |
666 | { |
667 | __bad_area(regs, error_code, address, SEGV_MAPERR); | |
668 | } | |
669 | ||
2d4a7167 IM |
670 | static noinline void |
671 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
672 | unsigned long address) | |
92181f19 NP |
673 | { |
674 | __bad_area(regs, error_code, address, SEGV_ACCERR); | |
675 | } | |
676 | ||
677 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | |
2d4a7167 IM |
678 | static void |
679 | out_of_memory(struct pt_regs *regs, unsigned long error_code, | |
680 | unsigned long address) | |
92181f19 NP |
681 | { |
682 | /* | |
683 | * We ran out of memory, call the OOM killer, and return the userspace | |
2d4a7167 | 684 | * (which will retry the fault, or kill us if we got oom-killed): |
92181f19 NP |
685 | */ |
686 | up_read(¤t->mm->mmap_sem); | |
2d4a7167 | 687 | |
92181f19 NP |
688 | pagefault_out_of_memory(); |
689 | } | |
690 | ||
2d4a7167 IM |
691 | static void |
692 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 NP |
693 | { |
694 | struct task_struct *tsk = current; | |
695 | struct mm_struct *mm = tsk->mm; | |
696 | ||
697 | up_read(&mm->mmap_sem); | |
698 | ||
2d4a7167 | 699 | /* Kernel mode? Handle exceptions or die: */ |
92181f19 NP |
700 | if (!(error_code & PF_USER)) |
701 | no_context(regs, error_code, address); | |
2d4a7167 | 702 | |
92181f19 | 703 | #ifdef CONFIG_X86_32 |
2d4a7167 | 704 | /* User space => ok to do another page fault: */ |
92181f19 NP |
705 | if (is_prefetch(regs, error_code, address)) |
706 | return; | |
707 | #endif | |
2d4a7167 IM |
708 | |
709 | tsk->thread.cr2 = address; | |
710 | tsk->thread.error_code = error_code; | |
711 | tsk->thread.trap_no = 14; | |
712 | ||
92181f19 NP |
713 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
714 | } | |
715 | ||
2d4a7167 IM |
716 | static noinline void |
717 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |
718 | unsigned long address, unsigned int fault) | |
92181f19 | 719 | { |
2d4a7167 | 720 | if (fault & VM_FAULT_OOM) { |
92181f19 | 721 | out_of_memory(regs, error_code, address); |
2d4a7167 IM |
722 | } else { |
723 | if (fault & VM_FAULT_SIGBUS) | |
724 | do_sigbus(regs, error_code, address); | |
725 | else | |
726 | BUG(); | |
727 | } | |
92181f19 NP |
728 | } |
729 | ||
d8b57bb7 TG |
730 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
731 | { | |
732 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
733 | return 0; | |
2d4a7167 | 734 | |
d8b57bb7 TG |
735 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
736 | return 0; | |
737 | ||
738 | return 1; | |
739 | } | |
740 | ||
5b727a3b | 741 | /* |
2d4a7167 IM |
742 | * Handle a spurious fault caused by a stale TLB entry. |
743 | * | |
744 | * This allows us to lazily refresh the TLB when increasing the | |
745 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
746 | * eagerly is very expensive since that implies doing a full | |
747 | * cross-processor TLB flush, even if no stale TLB entries exist | |
748 | * on other processors. | |
749 | * | |
5b727a3b JF |
750 | * There are no security implications to leaving a stale TLB when |
751 | * increasing the permissions on a page. | |
752 | */ | |
2d4a7167 IM |
753 | static noinline int |
754 | spurious_fault(unsigned long error_code, unsigned long address) | |
5b727a3b JF |
755 | { |
756 | pgd_t *pgd; | |
757 | pud_t *pud; | |
758 | pmd_t *pmd; | |
759 | pte_t *pte; | |
3c3e5694 | 760 | int ret; |
5b727a3b JF |
761 | |
762 | /* Reserved-bit violation or user access to kernel space? */ | |
763 | if (error_code & (PF_USER | PF_RSVD)) | |
764 | return 0; | |
765 | ||
766 | pgd = init_mm.pgd + pgd_index(address); | |
767 | if (!pgd_present(*pgd)) | |
768 | return 0; | |
769 | ||
770 | pud = pud_offset(pgd, address); | |
771 | if (!pud_present(*pud)) | |
772 | return 0; | |
773 | ||
d8b57bb7 TG |
774 | if (pud_large(*pud)) |
775 | return spurious_fault_check(error_code, (pte_t *) pud); | |
776 | ||
5b727a3b JF |
777 | pmd = pmd_offset(pud, address); |
778 | if (!pmd_present(*pmd)) | |
779 | return 0; | |
780 | ||
d8b57bb7 TG |
781 | if (pmd_large(*pmd)) |
782 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
783 | ||
5b727a3b JF |
784 | pte = pte_offset_kernel(pmd, address); |
785 | if (!pte_present(*pte)) | |
786 | return 0; | |
787 | ||
3c3e5694 SR |
788 | ret = spurious_fault_check(error_code, pte); |
789 | if (!ret) | |
790 | return 0; | |
791 | ||
792 | /* | |
2d4a7167 IM |
793 | * Make sure we have permissions in PMD. |
794 | * If not, then there's a bug in the page tables: | |
3c3e5694 SR |
795 | */ |
796 | ret = spurious_fault_check(error_code, (pte_t *) pmd); | |
797 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | |
2d4a7167 | 798 | |
3c3e5694 | 799 | return ret; |
5b727a3b JF |
800 | } |
801 | ||
1da177e4 | 802 | /* |
2d4a7167 IM |
803 | * 32-bit: |
804 | * | |
805 | * Handle a fault on the vmalloc or module mapping area | |
f8c2ee22 | 806 | * |
2d4a7167 IM |
807 | * 64-bit: |
808 | * | |
809 | * Handle a fault on the vmalloc area | |
3b9ba4d5 AK |
810 | * |
811 | * This assumes no large pages in there. | |
1da177e4 | 812 | */ |
92181f19 | 813 | static noinline int vmalloc_fault(unsigned long address) |
1da177e4 | 814 | { |
fdfe8aa8 HH |
815 | #ifdef CONFIG_X86_32 |
816 | unsigned long pgd_paddr; | |
817 | pmd_t *pmd_k; | |
818 | pte_t *pte_k; | |
b29c701d | 819 | |
2d4a7167 | 820 | /* Make sure we are in vmalloc area: */ |
b29c701d HN |
821 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
822 | return -1; | |
823 | ||
fdfe8aa8 HH |
824 | /* |
825 | * Synchronize this task's top level page-table | |
826 | * with the 'reference' page table. | |
827 | * | |
828 | * Do _not_ use "current" here. We might be inside | |
829 | * an interrupt in the middle of a task switch.. | |
830 | */ | |
831 | pgd_paddr = read_cr3(); | |
832 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
833 | if (!pmd_k) | |
834 | return -1; | |
2d4a7167 | 835 | |
fdfe8aa8 HH |
836 | pte_k = pte_offset_kernel(pmd_k, address); |
837 | if (!pte_present(*pte_k)) | |
838 | return -1; | |
2d4a7167 | 839 | |
fdfe8aa8 HH |
840 | return 0; |
841 | #else | |
1da177e4 LT |
842 | pgd_t *pgd, *pgd_ref; |
843 | pud_t *pud, *pud_ref; | |
844 | pmd_t *pmd, *pmd_ref; | |
845 | pte_t *pte, *pte_ref; | |
846 | ||
2d4a7167 | 847 | /* Make sure we are in vmalloc area: */ |
cf89ec92 HH |
848 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
849 | return -1; | |
850 | ||
2d4a7167 IM |
851 | /* |
852 | * Copy kernel mappings over when needed. This can also | |
853 | * happen within a race in page table update. In the later | |
854 | * case just flush: | |
855 | */ | |
f313e123 | 856 | pgd = pgd_offset(current->active_mm, address); |
1da177e4 LT |
857 | pgd_ref = pgd_offset_k(address); |
858 | if (pgd_none(*pgd_ref)) | |
859 | return -1; | |
2d4a7167 | 860 | |
1da177e4 LT |
861 | if (pgd_none(*pgd)) |
862 | set_pgd(pgd, *pgd_ref); | |
8c914cb7 | 863 | else |
46a82b2d | 864 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
1da177e4 | 865 | |
2d4a7167 IM |
866 | /* |
867 | * Below here mismatches are bugs because these lower tables | |
868 | * are shared: | |
869 | */ | |
1da177e4 LT |
870 | |
871 | pud = pud_offset(pgd, address); | |
872 | pud_ref = pud_offset(pgd_ref, address); | |
873 | if (pud_none(*pud_ref)) | |
874 | return -1; | |
2d4a7167 | 875 | |
46a82b2d | 876 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) |
1da177e4 | 877 | BUG(); |
2d4a7167 | 878 | |
1da177e4 LT |
879 | pmd = pmd_offset(pud, address); |
880 | pmd_ref = pmd_offset(pud_ref, address); | |
881 | if (pmd_none(*pmd_ref)) | |
882 | return -1; | |
2d4a7167 | 883 | |
1da177e4 LT |
884 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) |
885 | BUG(); | |
2d4a7167 | 886 | |
1da177e4 LT |
887 | pte_ref = pte_offset_kernel(pmd_ref, address); |
888 | if (!pte_present(*pte_ref)) | |
889 | return -1; | |
2d4a7167 | 890 | |
1da177e4 | 891 | pte = pte_offset_kernel(pmd, address); |
2d4a7167 IM |
892 | |
893 | /* | |
894 | * Don't use pte_page here, because the mappings can point | |
895 | * outside mem_map, and the NUMA hash lookup cannot handle | |
896 | * that: | |
897 | */ | |
3b9ba4d5 | 898 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) |
1da177e4 | 899 | BUG(); |
2d4a7167 | 900 | |
1da177e4 | 901 | return 0; |
fdfe8aa8 | 902 | #endif |
1da177e4 LT |
903 | } |
904 | ||
abd4f750 | 905 | int show_unhandled_signals = 1; |
1da177e4 | 906 | |
2d4a7167 IM |
907 | static inline int |
908 | access_error(unsigned long error_code, int write, struct vm_area_struct *vma) | |
92181f19 NP |
909 | { |
910 | if (write) { | |
2d4a7167 | 911 | /* write, present and write, not present: */ |
92181f19 NP |
912 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
913 | return 1; | |
2d4a7167 | 914 | return 0; |
92181f19 NP |
915 | } |
916 | ||
2d4a7167 IM |
917 | /* read, present: */ |
918 | if (unlikely(error_code & PF_PROT)) | |
919 | return 1; | |
920 | ||
921 | /* read, not present: */ | |
922 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
923 | return 1; | |
924 | ||
92181f19 NP |
925 | return 0; |
926 | } | |
927 | ||
0973a06c HS |
928 | static int fault_in_kernel_space(unsigned long address) |
929 | { | |
930 | #ifdef CONFIG_X86_32 | |
931 | return address >= TASK_SIZE; | |
2d4a7167 | 932 | #else |
0973a06c | 933 | return address >= TASK_SIZE64; |
2d4a7167 | 934 | #endif |
0973a06c HS |
935 | } |
936 | ||
1da177e4 LT |
937 | /* |
938 | * This routine handles page faults. It determines the address, | |
939 | * and the problem, and then passes it off to one of the appropriate | |
940 | * routines. | |
1da177e4 | 941 | */ |
f8c2ee22 HH |
942 | #ifdef CONFIG_X86_64 |
943 | asmlinkage | |
944 | #endif | |
945 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
1da177e4 | 946 | { |
2d4a7167 | 947 | struct vm_area_struct *vma; |
1da177e4 | 948 | struct task_struct *tsk; |
2d4a7167 | 949 | unsigned long address; |
1da177e4 | 950 | struct mm_struct *mm; |
92181f19 | 951 | int write; |
f8c2ee22 | 952 | int fault; |
1da177e4 | 953 | |
a9ba9a3b AV |
954 | tsk = current; |
955 | mm = tsk->mm; | |
2d4a7167 | 956 | |
a9ba9a3b AV |
957 | prefetchw(&mm->mmap_sem); |
958 | ||
2d4a7167 | 959 | /* Get the faulting address: */ |
f51c9452 | 960 | address = read_cr2(); |
1da177e4 | 961 | |
0fd0e3da | 962 | if (unlikely(kmmio_fault(regs, address))) |
86069782 | 963 | return; |
1da177e4 LT |
964 | |
965 | /* | |
966 | * We fault-in kernel-space virtual memory on-demand. The | |
967 | * 'reference' page table is init_mm.pgd. | |
968 | * | |
969 | * NOTE! We MUST NOT take any locks for this case. We may | |
970 | * be in an interrupt or a critical region, and should | |
971 | * only copy the information from the master page table, | |
972 | * nothing more. | |
973 | * | |
974 | * This verifies that the fault happens in kernel space | |
975 | * (error_code & 4) == 0, and that the fault was not a | |
8b1bde93 | 976 | * protection error (error_code & 9) == 0. |
1da177e4 | 977 | */ |
0973a06c | 978 | if (unlikely(fault_in_kernel_space(address))) { |
f8c2ee22 HH |
979 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
980 | vmalloc_fault(address) >= 0) | |
981 | return; | |
5b727a3b | 982 | |
2d4a7167 | 983 | /* Can handle a stale RO->RW TLB: */ |
92181f19 | 984 | if (spurious_fault(error_code, address)) |
5b727a3b JF |
985 | return; |
986 | ||
2d4a7167 | 987 | /* kprobes don't want to hook the spurious faults: */ |
9be260a6 MH |
988 | if (notify_page_fault(regs)) |
989 | return; | |
f8c2ee22 HH |
990 | /* |
991 | * Don't take the mm semaphore here. If we fixup a prefetch | |
2d4a7167 | 992 | * fault we could otherwise deadlock: |
f8c2ee22 | 993 | */ |
92181f19 | 994 | bad_area_nosemaphore(regs, error_code, address); |
2d4a7167 | 995 | |
92181f19 | 996 | return; |
f8c2ee22 HH |
997 | } |
998 | ||
2d4a7167 | 999 | /* kprobes don't want to hook the spurious faults: */ |
f8a6b2b9 | 1000 | if (unlikely(notify_page_fault(regs))) |
9be260a6 | 1001 | return; |
f8c2ee22 | 1002 | /* |
891cffbd LT |
1003 | * It's safe to allow irq's after cr2 has been saved and the |
1004 | * vmalloc fault has been handled. | |
1005 | * | |
1006 | * User-mode registers count as a user access even for any | |
2d4a7167 | 1007 | * potential system fault or CPU buglet: |
f8c2ee22 | 1008 | */ |
891cffbd LT |
1009 | if (user_mode_vm(regs)) { |
1010 | local_irq_enable(); | |
1011 | error_code |= PF_USER; | |
2d4a7167 IM |
1012 | } else { |
1013 | if (regs->flags & X86_EFLAGS_IF) | |
1014 | local_irq_enable(); | |
1015 | } | |
8c914cb7 | 1016 | |
66c58156 | 1017 | if (unlikely(error_code & PF_RSVD)) |
92181f19 | 1018 | pgtable_bad(regs, error_code, address); |
1da177e4 LT |
1019 | |
1020 | /* | |
2d4a7167 IM |
1021 | * If we're in an interrupt, have no user context or are running |
1022 | * in an atomic region then we must not take the fault: | |
1da177e4 | 1023 | */ |
92181f19 NP |
1024 | if (unlikely(in_atomic() || !mm)) { |
1025 | bad_area_nosemaphore(regs, error_code, address); | |
1026 | return; | |
1027 | } | |
1da177e4 | 1028 | |
3a1dfe6e IM |
1029 | /* |
1030 | * When running in the kernel we expect faults to occur only to | |
2d4a7167 IM |
1031 | * addresses in user space. All other faults represent errors in |
1032 | * the kernel and should generate an OOPS. Unfortunately, in the | |
1033 | * case of an erroneous fault occurring in a code path which already | |
1034 | * holds mmap_sem we will deadlock attempting to validate the fault | |
1035 | * against the address space. Luckily the kernel only validly | |
1036 | * references user space from well defined areas of code, which are | |
1037 | * listed in the exceptions table. | |
1da177e4 LT |
1038 | * |
1039 | * As the vast majority of faults will be valid we will only perform | |
2d4a7167 IM |
1040 | * the source reference check when there is a possibility of a |
1041 | * deadlock. Attempt to lock the address space, if we cannot we then | |
1042 | * validate the source. If this is invalid we can skip the address | |
1043 | * space check, thus avoiding the deadlock: | |
1da177e4 | 1044 | */ |
92181f19 | 1045 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
66c58156 | 1046 | if ((error_code & PF_USER) == 0 && |
92181f19 NP |
1047 | !search_exception_tables(regs->ip)) { |
1048 | bad_area_nosemaphore(regs, error_code, address); | |
1049 | return; | |
1050 | } | |
1da177e4 | 1051 | down_read(&mm->mmap_sem); |
01006074 PZ |
1052 | } else { |
1053 | /* | |
2d4a7167 IM |
1054 | * The above down_read_trylock() might have succeeded in |
1055 | * which case we'll have missed the might_sleep() from | |
1056 | * down_read(): | |
01006074 PZ |
1057 | */ |
1058 | might_sleep(); | |
1da177e4 LT |
1059 | } |
1060 | ||
1061 | vma = find_vma(mm, address); | |
92181f19 NP |
1062 | if (unlikely(!vma)) { |
1063 | bad_area(regs, error_code, address); | |
1064 | return; | |
1065 | } | |
1066 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 1067 | goto good_area; |
92181f19 NP |
1068 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
1069 | bad_area(regs, error_code, address); | |
1070 | return; | |
1071 | } | |
33cb5243 | 1072 | if (error_code & PF_USER) { |
6f4d368e HH |
1073 | /* |
1074 | * Accessing the stack below %sp is always a bug. | |
1075 | * The large cushion allows instructions like enter | |
2d4a7167 | 1076 | * and pusha to work. ("enter $65535, $31" pushes |
6f4d368e | 1077 | * 32 pointers and then decrements %sp by 65535.) |
03fdc2c2 | 1078 | */ |
92181f19 NP |
1079 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
1080 | bad_area(regs, error_code, address); | |
1081 | return; | |
1082 | } | |
1da177e4 | 1083 | } |
92181f19 NP |
1084 | if (unlikely(expand_stack(vma, address))) { |
1085 | bad_area(regs, error_code, address); | |
1086 | return; | |
1087 | } | |
1088 | ||
1089 | /* | |
1090 | * Ok, we have a good vm_area for this memory access, so | |
1091 | * we can handle it.. | |
1092 | */ | |
1da177e4 | 1093 | good_area: |
92181f19 | 1094 | write = error_code & PF_WRITE; |
2d4a7167 | 1095 | |
92181f19 NP |
1096 | if (unlikely(access_error(error_code, write, vma))) { |
1097 | bad_area_access_error(regs, error_code, address); | |
1098 | return; | |
1da177e4 LT |
1099 | } |
1100 | ||
1101 | /* | |
1102 | * If for any reason at all we couldn't handle the fault, | |
1103 | * make sure we exit gracefully rather than endlessly redo | |
2d4a7167 | 1104 | * the fault: |
1da177e4 | 1105 | */ |
83c54070 | 1106 | fault = handle_mm_fault(mm, vma, address, write); |
2d4a7167 | 1107 | |
83c54070 | 1108 | if (unlikely(fault & VM_FAULT_ERROR)) { |
92181f19 NP |
1109 | mm_fault_error(regs, error_code, address, fault); |
1110 | return; | |
1da177e4 | 1111 | } |
2d4a7167 | 1112 | |
83c54070 NP |
1113 | if (fault & VM_FAULT_MAJOR) |
1114 | tsk->maj_flt++; | |
1115 | else | |
1116 | tsk->min_flt++; | |
d729ab35 | 1117 | |
8c938f9f IM |
1118 | check_v8086_mode(regs, address, tsk); |
1119 | ||
1da177e4 | 1120 | up_read(&mm->mmap_sem); |
1da177e4 | 1121 | } |
9e43e1b7 | 1122 | |
8c914cb7 | 1123 | DEFINE_SPINLOCK(pgd_lock); |
2bff7383 | 1124 | LIST_HEAD(pgd_list); |
8c914cb7 JB |
1125 | |
1126 | void vmalloc_sync_all(void) | |
1127 | { | |
1156e098 HH |
1128 | unsigned long address; |
1129 | ||
cc643d46 | 1130 | #ifdef CONFIG_X86_32 |
1156e098 HH |
1131 | if (SHARED_KERNEL_PMD) |
1132 | return; | |
1133 | ||
cc643d46 JB |
1134 | for (address = VMALLOC_START & PMD_MASK; |
1135 | address >= TASK_SIZE && address < FIXADDR_TOP; | |
1136 | address += PMD_SIZE) { | |
2d4a7167 | 1137 | |
67350a5c JF |
1138 | unsigned long flags; |
1139 | struct page *page; | |
1140 | ||
1141 | spin_lock_irqsave(&pgd_lock, flags); | |
1142 | list_for_each_entry(page, &pgd_list, lru) { | |
2d4a7167 | 1143 | if (!vmalloc_sync_one(page_address(page), address)) |
67350a5c | 1144 | break; |
1156e098 | 1145 | } |
67350a5c | 1146 | spin_unlock_irqrestore(&pgd_lock, flags); |
1156e098 HH |
1147 | } |
1148 | #else /* CONFIG_X86_64 */ | |
cc643d46 JB |
1149 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; |
1150 | address += PGDIR_SIZE) { | |
2d4a7167 | 1151 | |
67350a5c JF |
1152 | const pgd_t *pgd_ref = pgd_offset_k(address); |
1153 | unsigned long flags; | |
1154 | struct page *page; | |
1155 | ||
1156 | if (pgd_none(*pgd_ref)) | |
1157 | continue; | |
2d4a7167 | 1158 | |
67350a5c JF |
1159 | spin_lock_irqsave(&pgd_lock, flags); |
1160 | list_for_each_entry(page, &pgd_list, lru) { | |
1161 | pgd_t *pgd; | |
1162 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
1163 | if (pgd_none(*pgd)) | |
1164 | set_pgd(pgd, *pgd_ref); | |
1165 | else | |
1166 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
8c914cb7 | 1167 | } |
67350a5c | 1168 | spin_unlock_irqrestore(&pgd_lock, flags); |
8c914cb7 | 1169 | } |
1156e098 | 1170 | #endif |
8c914cb7 | 1171 | } |