]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/fault.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/signal.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/ptrace.h> | |
14 | #include <linux/mman.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/smp_lock.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/tty.h> | |
21 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/module.h> | |
24 | ||
25 | #include <asm/system.h> | |
26 | #include <asm/uaccess.h> | |
27 | #include <asm/desc.h> | |
28 | #include <asm/kdebug.h> | |
29 | ||
30 | extern void die(const char *,struct pt_regs *,long); | |
31 | ||
32 | /* | |
33 | * Unlock any spinlocks which will prevent us from getting the | |
34 | * message out | |
35 | */ | |
36 | void bust_spinlocks(int yes) | |
37 | { | |
38 | int loglevel_save = console_loglevel; | |
39 | ||
40 | if (yes) { | |
41 | oops_in_progress = 1; | |
42 | return; | |
43 | } | |
44 | #ifdef CONFIG_VT | |
45 | unblank_screen(); | |
46 | #endif | |
47 | oops_in_progress = 0; | |
48 | /* | |
49 | * OK, the message is on the console. Now we call printk() | |
50 | * without oops_in_progress set so that printk will give klogd | |
51 | * a poke. Hold onto your hats... | |
52 | */ | |
53 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | |
54 | printk(" "); | |
55 | console_loglevel = loglevel_save; | |
56 | } | |
57 | ||
58 | /* | |
59 | * Return EIP plus the CS segment base. The segment limit is also | |
60 | * adjusted, clamped to the kernel/user address space (whichever is | |
61 | * appropriate), and returned in *eip_limit. | |
62 | * | |
63 | * The segment is checked, because it might have been changed by another | |
64 | * task between the original faulting instruction and here. | |
65 | * | |
66 | * If CS is no longer a valid code segment, or if EIP is beyond the | |
67 | * limit, or if it is a kernel address when CS is not a kernel segment, | |
68 | * then the returned value will be greater than *eip_limit. | |
69 | * | |
70 | * This is slow, but is very rarely executed. | |
71 | */ | |
72 | static inline unsigned long get_segment_eip(struct pt_regs *regs, | |
73 | unsigned long *eip_limit) | |
74 | { | |
75 | unsigned long eip = regs->eip; | |
76 | unsigned seg = regs->xcs & 0xffff; | |
77 | u32 seg_ar, seg_limit, base, *desc; | |
78 | ||
79 | /* The standard kernel/user address space limit. */ | |
80 | *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg; | |
81 | ||
82 | /* Unlikely, but must come before segment checks. */ | |
83 | if (unlikely((regs->eflags & VM_MASK) != 0)) | |
84 | return eip + (seg << 4); | |
85 | ||
86 | /* By far the most common cases. */ | |
87 | if (likely(seg == __USER_CS || seg == __KERNEL_CS)) | |
88 | return eip; | |
89 | ||
90 | /* Check the segment exists, is within the current LDT/GDT size, | |
91 | that kernel/user (ring 0..3) has the appropriate privilege, | |
92 | that it's a code segment, and get the limit. */ | |
93 | __asm__ ("larl %3,%0; lsll %3,%1" | |
94 | : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg)); | |
95 | if ((~seg_ar & 0x9800) || eip > seg_limit) { | |
96 | *eip_limit = 0; | |
97 | return 1; /* So that returned eip > *eip_limit. */ | |
98 | } | |
99 | ||
100 | /* Get the GDT/LDT descriptor base. | |
101 | When you look for races in this code remember that | |
102 | LDT and other horrors are only used in user space. */ | |
103 | if (seg & (1<<2)) { | |
104 | /* Must lock the LDT while reading it. */ | |
105 | down(¤t->mm->context.sem); | |
106 | desc = current->mm->context.ldt; | |
107 | desc = (void *)desc + (seg & ~7); | |
108 | } else { | |
109 | /* Must disable preemption while reading the GDT. */ | |
110 | desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); | |
111 | desc = (void *)desc + (seg & ~7); | |
112 | } | |
113 | ||
114 | /* Decode the code segment base from the descriptor */ | |
115 | base = get_desc_base((unsigned long *)desc); | |
116 | ||
117 | if (seg & (1<<2)) { | |
118 | up(¤t->mm->context.sem); | |
119 | } else | |
120 | put_cpu(); | |
121 | ||
122 | /* Adjust EIP and segment limit, and clamp at the kernel limit. | |
123 | It's legitimate for segments to wrap at 0xffffffff. */ | |
124 | seg_limit += base; | |
125 | if (seg_limit < *eip_limit && seg_limit >= base) | |
126 | *eip_limit = seg_limit; | |
127 | return eip + base; | |
128 | } | |
129 | ||
130 | /* | |
131 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
132 | * Check that here and ignore it. | |
133 | */ | |
134 | static int __is_prefetch(struct pt_regs *regs, unsigned long addr) | |
135 | { | |
136 | unsigned long limit; | |
137 | unsigned long instr = get_segment_eip (regs, &limit); | |
138 | int scan_more = 1; | |
139 | int prefetch = 0; | |
140 | int i; | |
141 | ||
142 | for (i = 0; scan_more && i < 15; i++) { | |
143 | unsigned char opcode; | |
144 | unsigned char instr_hi; | |
145 | unsigned char instr_lo; | |
146 | ||
147 | if (instr > limit) | |
148 | break; | |
c7c58445 | 149 | if (__get_user(opcode, (unsigned char __user *) instr)) |
1da177e4 LT |
150 | break; |
151 | ||
152 | instr_hi = opcode & 0xf0; | |
153 | instr_lo = opcode & 0x0f; | |
154 | instr++; | |
155 | ||
156 | switch (instr_hi) { | |
157 | case 0x20: | |
158 | case 0x30: | |
159 | /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */ | |
160 | scan_more = ((instr_lo & 7) == 0x6); | |
161 | break; | |
162 | ||
163 | case 0x60: | |
164 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
165 | scan_more = (instr_lo & 0xC) == 0x4; | |
166 | break; | |
167 | case 0xF0: | |
168 | /* 0xF0, 0xF2, and 0xF3 are valid prefixes */ | |
169 | scan_more = !instr_lo || (instr_lo>>1) == 1; | |
170 | break; | |
171 | case 0x00: | |
172 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
173 | scan_more = 0; | |
174 | if (instr > limit) | |
175 | break; | |
c7c58445 | 176 | if (__get_user(opcode, (unsigned char __user *) instr)) |
1da177e4 LT |
177 | break; |
178 | prefetch = (instr_lo == 0xF) && | |
179 | (opcode == 0x0D || opcode == 0x18); | |
180 | break; | |
181 | default: | |
182 | scan_more = 0; | |
183 | break; | |
184 | } | |
185 | } | |
186 | return prefetch; | |
187 | } | |
188 | ||
189 | static inline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |
190 | unsigned long error_code) | |
191 | { | |
192 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | |
193 | boot_cpu_data.x86 >= 6)) { | |
194 | /* Catch an obscure case of prefetch inside an NX page. */ | |
195 | if (nx_enabled && (error_code & 16)) | |
196 | return 0; | |
197 | return __is_prefetch(regs, addr); | |
198 | } | |
199 | return 0; | |
200 | } | |
201 | ||
869f96a0 IM |
202 | static noinline void force_sig_info_fault(int si_signo, int si_code, |
203 | unsigned long address, struct task_struct *tsk) | |
204 | { | |
205 | siginfo_t info; | |
206 | ||
207 | info.si_signo = si_signo; | |
208 | info.si_errno = 0; | |
209 | info.si_code = si_code; | |
210 | info.si_addr = (void __user *)address; | |
211 | force_sig_info(si_signo, &info, tsk); | |
212 | } | |
213 | ||
1da177e4 LT |
214 | fastcall void do_invalid_op(struct pt_regs *, unsigned long); |
215 | ||
216 | /* | |
217 | * This routine handles page faults. It determines the address, | |
218 | * and the problem, and then passes it off to one of the appropriate | |
219 | * routines. | |
220 | * | |
221 | * error_code: | |
222 | * bit 0 == 0 means no page found, 1 means protection fault | |
223 | * bit 1 == 0 means read, 1 means write | |
224 | * bit 2 == 0 means kernel, 1 means user-mode | |
225 | */ | |
226 | fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
227 | { | |
228 | struct task_struct *tsk; | |
229 | struct mm_struct *mm; | |
230 | struct vm_area_struct * vma; | |
231 | unsigned long address; | |
232 | unsigned long page; | |
869f96a0 | 233 | int write, si_code; |
1da177e4 LT |
234 | |
235 | /* get the address */ | |
4bb0d3ec | 236 | address = read_cr2(); |
1da177e4 LT |
237 | |
238 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | |
239 | SIGSEGV) == NOTIFY_STOP) | |
240 | return; | |
241 | /* It's safe to allow irq's after cr2 has been saved */ | |
242 | if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) | |
243 | local_irq_enable(); | |
244 | ||
245 | tsk = current; | |
246 | ||
869f96a0 | 247 | si_code = SEGV_MAPERR; |
1da177e4 LT |
248 | |
249 | /* | |
250 | * We fault-in kernel-space virtual memory on-demand. The | |
251 | * 'reference' page table is init_mm.pgd. | |
252 | * | |
253 | * NOTE! We MUST NOT take any locks for this case. We may | |
254 | * be in an interrupt or a critical region, and should | |
255 | * only copy the information from the master page table, | |
256 | * nothing more. | |
257 | * | |
258 | * This verifies that the fault happens in kernel space | |
259 | * (error_code & 4) == 0, and that the fault was not a | |
260 | * protection error (error_code & 1) == 0. | |
261 | */ | |
262 | if (unlikely(address >= TASK_SIZE)) { | |
263 | if (!(error_code & 5)) | |
264 | goto vmalloc_fault; | |
265 | /* | |
266 | * Don't take the mm semaphore here. If we fixup a prefetch | |
267 | * fault we could otherwise deadlock. | |
268 | */ | |
269 | goto bad_area_nosemaphore; | |
270 | } | |
271 | ||
272 | mm = tsk->mm; | |
273 | ||
274 | /* | |
275 | * If we're in an interrupt, have no user context or are running in an | |
276 | * atomic region then we must not take the fault.. | |
277 | */ | |
278 | if (in_atomic() || !mm) | |
279 | goto bad_area_nosemaphore; | |
280 | ||
281 | /* When running in the kernel we expect faults to occur only to | |
282 | * addresses in user space. All other faults represent errors in the | |
283 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | |
284 | * erroneous fault occuring in a code path which already holds mmap_sem | |
285 | * we will deadlock attempting to validate the fault against the | |
286 | * address space. Luckily the kernel only validly references user | |
287 | * space from well defined areas of code, which are listed in the | |
288 | * exceptions table. | |
289 | * | |
290 | * As the vast majority of faults will be valid we will only perform | |
291 | * the source reference check when there is a possibilty of a deadlock. | |
292 | * Attempt to lock the address space, if we cannot we then validate the | |
293 | * source. If this is invalid we can skip the address space check, | |
294 | * thus avoiding the deadlock. | |
295 | */ | |
296 | if (!down_read_trylock(&mm->mmap_sem)) { | |
297 | if ((error_code & 4) == 0 && | |
298 | !search_exception_tables(regs->eip)) | |
299 | goto bad_area_nosemaphore; | |
300 | down_read(&mm->mmap_sem); | |
301 | } | |
302 | ||
303 | vma = find_vma(mm, address); | |
304 | if (!vma) | |
305 | goto bad_area; | |
306 | if (vma->vm_start <= address) | |
307 | goto good_area; | |
308 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
309 | goto bad_area; | |
310 | if (error_code & 4) { | |
311 | /* | |
312 | * accessing the stack below %esp is always a bug. | |
313 | * The "+ 32" is there due to some instructions (like | |
314 | * pusha) doing post-decrement on the stack and that | |
315 | * doesn't show up until later.. | |
316 | */ | |
317 | if (address + 32 < regs->esp) | |
318 | goto bad_area; | |
319 | } | |
320 | if (expand_stack(vma, address)) | |
321 | goto bad_area; | |
322 | /* | |
323 | * Ok, we have a good vm_area for this memory access, so | |
324 | * we can handle it.. | |
325 | */ | |
326 | good_area: | |
869f96a0 | 327 | si_code = SEGV_ACCERR; |
1da177e4 LT |
328 | write = 0; |
329 | switch (error_code & 3) { | |
330 | default: /* 3: write, present */ | |
331 | #ifdef TEST_VERIFY_AREA | |
332 | if (regs->cs == KERNEL_CS) | |
333 | printk("WP fault at %08lx\n", regs->eip); | |
334 | #endif | |
335 | /* fall through */ | |
336 | case 2: /* write, not present */ | |
337 | if (!(vma->vm_flags & VM_WRITE)) | |
338 | goto bad_area; | |
339 | write++; | |
340 | break; | |
341 | case 1: /* read, present */ | |
342 | goto bad_area; | |
343 | case 0: /* read, not present */ | |
344 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
345 | goto bad_area; | |
346 | } | |
347 | ||
348 | survive: | |
349 | /* | |
350 | * If for any reason at all we couldn't handle the fault, | |
351 | * make sure we exit gracefully rather than endlessly redo | |
352 | * the fault. | |
353 | */ | |
354 | switch (handle_mm_fault(mm, vma, address, write)) { | |
355 | case VM_FAULT_MINOR: | |
356 | tsk->min_flt++; | |
357 | break; | |
358 | case VM_FAULT_MAJOR: | |
359 | tsk->maj_flt++; | |
360 | break; | |
361 | case VM_FAULT_SIGBUS: | |
362 | goto do_sigbus; | |
363 | case VM_FAULT_OOM: | |
364 | goto out_of_memory; | |
365 | default: | |
366 | BUG(); | |
367 | } | |
368 | ||
369 | /* | |
370 | * Did it hit the DOS screen memory VA from vm86 mode? | |
371 | */ | |
372 | if (regs->eflags & VM_MASK) { | |
373 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; | |
374 | if (bit < 32) | |
375 | tsk->thread.screen_bitmap |= 1 << bit; | |
376 | } | |
377 | up_read(&mm->mmap_sem); | |
378 | return; | |
379 | ||
380 | /* | |
381 | * Something tried to access memory that isn't in our memory map.. | |
382 | * Fix it, but check if it's kernel or user first.. | |
383 | */ | |
384 | bad_area: | |
385 | up_read(&mm->mmap_sem); | |
386 | ||
387 | bad_area_nosemaphore: | |
388 | /* User mode accesses just cause a SIGSEGV */ | |
389 | if (error_code & 4) { | |
390 | /* | |
391 | * Valid to do another page fault here because this one came | |
392 | * from user space. | |
393 | */ | |
394 | if (is_prefetch(regs, address, error_code)) | |
395 | return; | |
396 | ||
397 | tsk->thread.cr2 = address; | |
398 | /* Kernel addresses are always protection faults */ | |
399 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
400 | tsk->thread.trap_no = 14; | |
869f96a0 | 401 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
1da177e4 LT |
402 | return; |
403 | } | |
404 | ||
405 | #ifdef CONFIG_X86_F00F_BUG | |
406 | /* | |
407 | * Pentium F0 0F C7 C8 bug workaround. | |
408 | */ | |
409 | if (boot_cpu_data.f00f_bug) { | |
410 | unsigned long nr; | |
411 | ||
412 | nr = (address - idt_descr.address) >> 3; | |
413 | ||
414 | if (nr == 6) { | |
415 | do_invalid_op(regs, 0); | |
416 | return; | |
417 | } | |
418 | } | |
419 | #endif | |
420 | ||
421 | no_context: | |
422 | /* Are we prepared to handle this kernel fault? */ | |
423 | if (fixup_exception(regs)) | |
424 | return; | |
425 | ||
426 | /* | |
427 | * Valid to do another page fault here, because if this fault | |
428 | * had been triggered by is_prefetch fixup_exception would have | |
429 | * handled it. | |
430 | */ | |
431 | if (is_prefetch(regs, address, error_code)) | |
432 | return; | |
433 | ||
434 | /* | |
435 | * Oops. The kernel tried to access some bad page. We'll have to | |
436 | * terminate things with extreme prejudice. | |
437 | */ | |
438 | ||
439 | bust_spinlocks(1); | |
440 | ||
441 | #ifdef CONFIG_X86_PAE | |
442 | if (error_code & 16) { | |
443 | pte_t *pte = lookup_address(address); | |
444 | ||
445 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | |
446 | printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); | |
447 | } | |
448 | #endif | |
449 | if (address < PAGE_SIZE) | |
450 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | |
451 | else | |
452 | printk(KERN_ALERT "Unable to handle kernel paging request"); | |
453 | printk(" at virtual address %08lx\n",address); | |
454 | printk(KERN_ALERT " printing eip:\n"); | |
455 | printk("%08lx\n", regs->eip); | |
4bb0d3ec | 456 | page = read_cr3(); |
1da177e4 LT |
457 | page = ((unsigned long *) __va(page))[address >> 22]; |
458 | printk(KERN_ALERT "*pde = %08lx\n", page); | |
459 | /* | |
460 | * We must not directly access the pte in the highpte | |
461 | * case, the page table might be allocated in highmem. | |
462 | * And lets rather not kmap-atomic the pte, just in case | |
463 | * it's allocated already. | |
464 | */ | |
465 | #ifndef CONFIG_HIGHPTE | |
466 | if (page & 1) { | |
467 | page &= PAGE_MASK; | |
468 | address &= 0x003ff000; | |
469 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | |
470 | printk(KERN_ALERT "*pte = %08lx\n", page); | |
471 | } | |
472 | #endif | |
4f339ecb AN |
473 | tsk->thread.cr2 = address; |
474 | tsk->thread.trap_no = 14; | |
475 | tsk->thread.error_code = error_code; | |
1da177e4 LT |
476 | die("Oops", regs, error_code); |
477 | bust_spinlocks(0); | |
478 | do_exit(SIGKILL); | |
479 | ||
480 | /* | |
481 | * We ran out of memory, or some other thing happened to us that made | |
482 | * us unable to handle the page fault gracefully. | |
483 | */ | |
484 | out_of_memory: | |
485 | up_read(&mm->mmap_sem); | |
486 | if (tsk->pid == 1) { | |
487 | yield(); | |
488 | down_read(&mm->mmap_sem); | |
489 | goto survive; | |
490 | } | |
491 | printk("VM: killing process %s\n", tsk->comm); | |
492 | if (error_code & 4) | |
493 | do_exit(SIGKILL); | |
494 | goto no_context; | |
495 | ||
496 | do_sigbus: | |
497 | up_read(&mm->mmap_sem); | |
498 | ||
499 | /* Kernel mode? Handle exceptions or die */ | |
500 | if (!(error_code & 4)) | |
501 | goto no_context; | |
502 | ||
503 | /* User space => ok to do another page fault */ | |
504 | if (is_prefetch(regs, address, error_code)) | |
505 | return; | |
506 | ||
507 | tsk->thread.cr2 = address; | |
508 | tsk->thread.error_code = error_code; | |
509 | tsk->thread.trap_no = 14; | |
869f96a0 | 510 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
1da177e4 LT |
511 | return; |
512 | ||
513 | vmalloc_fault: | |
514 | { | |
515 | /* | |
516 | * Synchronize this task's top level page-table | |
517 | * with the 'reference' page table. | |
518 | * | |
519 | * Do _not_ use "tsk" here. We might be inside | |
520 | * an interrupt in the middle of a task switch.. | |
521 | */ | |
522 | int index = pgd_index(address); | |
523 | unsigned long pgd_paddr; | |
524 | pgd_t *pgd, *pgd_k; | |
525 | pud_t *pud, *pud_k; | |
526 | pmd_t *pmd, *pmd_k; | |
527 | pte_t *pte_k; | |
528 | ||
4bb0d3ec | 529 | pgd_paddr = read_cr3(); |
1da177e4 LT |
530 | pgd = index + (pgd_t *)__va(pgd_paddr); |
531 | pgd_k = init_mm.pgd + index; | |
532 | ||
533 | if (!pgd_present(*pgd_k)) | |
534 | goto no_context; | |
535 | ||
536 | /* | |
537 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
538 | * and redundant with the set_pmd() on non-PAE. As would | |
539 | * set_pud. | |
540 | */ | |
541 | ||
542 | pud = pud_offset(pgd, address); | |
543 | pud_k = pud_offset(pgd_k, address); | |
544 | if (!pud_present(*pud_k)) | |
545 | goto no_context; | |
546 | ||
547 | pmd = pmd_offset(pud, address); | |
548 | pmd_k = pmd_offset(pud_k, address); | |
549 | if (!pmd_present(*pmd_k)) | |
550 | goto no_context; | |
551 | set_pmd(pmd, *pmd_k); | |
552 | ||
553 | pte_k = pte_offset_kernel(pmd_k, address); | |
554 | if (!pte_present(*pte_k)) | |
555 | goto no_context; | |
556 | return; | |
557 | } | |
558 | } |