]>
Commit | Line | Data |
---|---|---|
ea2ba7dc | 1 | /* |
4c9e1385 | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
3 | * Licensed under the GPL |
4 | */ | |
5 | ||
4c9e1385 JD |
6 | #include <linux/mm.h> |
7 | #include <linux/sched.h> | |
8 | #include <linux/hardirq.h> | |
9 | #include <asm/current.h> | |
10 | #include <asm/pgtable.h> | |
11 | #include <asm/tlbflush.h> | |
eb830759 | 12 | #include "arch.h" |
4c9e1385 JD |
13 | #include "as-layout.h" |
14 | #include "kern_util.h" | |
ea2ba7dc | 15 | #include "os.h" |
4c9e1385 | 16 | #include "sysdep/sigcontext.h" |
1da177e4 | 17 | |
4c9e1385 JD |
18 | /* |
19 | * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by | |
20 | * segv(). | |
21 | */ | |
1d3468a6 | 22 | int handle_page_fault(unsigned long address, unsigned long ip, |
1da177e4 LT |
23 | int is_write, int is_user, int *code_out) |
24 | { | |
25 | struct mm_struct *mm = current->mm; | |
26 | struct vm_area_struct *vma; | |
27 | pgd_t *pgd; | |
28 | pud_t *pud; | |
29 | pmd_t *pmd; | |
30 | pte_t *pte; | |
1da177e4 LT |
31 | int err = -EFAULT; |
32 | ||
33 | *code_out = SEGV_MAPERR; | |
fea03cb4 | 34 | |
4c9e1385 JD |
35 | /* |
36 | * If the fault was during atomic operation, don't take the fault, just | |
37 | * fail. | |
38 | */ | |
fea03cb4 PBG |
39 | if (in_atomic()) |
40 | goto out_nosemaphore; | |
41 | ||
1da177e4 LT |
42 | down_read(&mm->mmap_sem); |
43 | vma = find_vma(mm, address); | |
4c9e1385 | 44 | if (!vma) |
1da177e4 | 45 | goto out; |
4c9e1385 | 46 | else if (vma->vm_start <= address) |
1da177e4 | 47 | goto good_area; |
4c9e1385 | 48 | else if (!(vma->vm_flags & VM_GROWSDOWN)) |
1da177e4 | 49 | goto out; |
4c9e1385 | 50 | else if (is_user && !ARCH_IS_STACKGROW(address)) |
1da177e4 | 51 | goto out; |
4c9e1385 | 52 | else if (expand_stack(vma, address)) |
1da177e4 LT |
53 | goto out; |
54 | ||
3b52166c | 55 | good_area: |
1da177e4 | 56 | *code_out = SEGV_ACCERR; |
4c9e1385 | 57 | if (is_write && !(vma->vm_flags & VM_WRITE)) |
1da177e4 | 58 | goto out; |
13479d52 | 59 | |
d129f312 | 60 | /* Don't require VM_READ|VM_EXEC for write faults! */ |
4c9e1385 | 61 | if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) |
5d86456d | 62 | goto out; |
13479d52 | 63 | |
1da177e4 | 64 | do { |
83c54070 | 65 | int fault; |
3b52166c | 66 | survive: |
83c54070 NP |
67 | fault = handle_mm_fault(mm, vma, address, is_write); |
68 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
69 | if (fault & VM_FAULT_OOM) { | |
70 | err = -ENOMEM; | |
71 | goto out_of_memory; | |
72 | } else if (fault & VM_FAULT_SIGBUS) { | |
73 | err = -EACCES; | |
74 | goto out; | |
75 | } | |
1da177e4 LT |
76 | BUG(); |
77 | } | |
83c54070 NP |
78 | if (fault & VM_FAULT_MAJOR) |
79 | current->maj_flt++; | |
80 | else | |
81 | current->min_flt++; | |
82 | ||
3b52166c PBG |
83 | pgd = pgd_offset(mm, address); |
84 | pud = pud_offset(pgd, address); | |
85 | pmd = pmd_offset(pud, address); | |
86 | pte = pte_offset_kernel(pmd, address); | |
4c9e1385 | 87 | } while (!pte_present(*pte)); |
1da177e4 | 88 | err = 0; |
4c9e1385 JD |
89 | /* |
90 | * The below warning was added in place of | |
cbc24afa PBG |
91 | * pte_mkyoung(); if (is_write) pte_mkdirty(); |
92 | * If it's triggered, we'd see normally a hang here (a clean pte is | |
93 | * marked read-only to emulate the dirty bit). | |
94 | * However, the generic code can mark a PTE writable but clean on a | |
95 | * concurrent read fault, triggering this harmlessly. So comment it out. | |
96 | */ | |
97 | #if 0 | |
16b03678 | 98 | WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); |
cbc24afa | 99 | #endif |
3b52166c PBG |
100 | flush_tlb_page(vma, address); |
101 | out: | |
1da177e4 | 102 | up_read(&mm->mmap_sem); |
fea03cb4 | 103 | out_nosemaphore: |
4c9e1385 | 104 | return err; |
1da177e4 LT |
105 | |
106 | /* | |
107 | * We ran out of memory, or some other thing happened to us that made | |
108 | * us unable to handle the page fault gracefully. | |
109 | */ | |
110 | out_of_memory: | |
b460cbc5 | 111 | if (is_global_init(current)) { |
1da177e4 LT |
112 | up_read(&mm->mmap_sem); |
113 | yield(); | |
114 | down_read(&mm->mmap_sem); | |
115 | goto survive; | |
116 | } | |
117 | goto out; | |
118 | } | |
119 | ||
27aa6ef3 JD |
120 | static void bad_segv(struct faultinfo fi, unsigned long ip) |
121 | { | |
122 | struct siginfo si; | |
123 | ||
124 | si.si_signo = SIGSEGV; | |
125 | si.si_code = SEGV_ACCERR; | |
126 | si.si_addr = (void __user *) FAULT_ADDRESS(fi); | |
127 | current->thread.arch.faultinfo = fi; | |
128 | force_sig_info(SIGSEGV, &si, current); | |
129 | } | |
130 | ||
77bf4400 | 131 | static void segv_handler(int sig, struct uml_pt_regs *regs) |
c66fdd5e GS |
132 | { |
133 | struct faultinfo * fi = UPT_FAULTINFO(regs); | |
134 | ||
4c9e1385 | 135 | if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { |
c66fdd5e GS |
136 | bad_segv(*fi, UPT_IP(regs)); |
137 | return; | |
138 | } | |
139 | segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); | |
140 | } | |
141 | ||
c578455a BS |
142 | /* |
143 | * We give a *copy* of the faultinfo in the regs to segv. | |
144 | * This must be done, since nesting SEGVs could overwrite | |
145 | * the info in the regs. A pointer to the info then would | |
146 | * give us bad data! | |
147 | */ | |
5d86456d | 148 | unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, |
77bf4400 | 149 | struct uml_pt_regs *regs) |
1da177e4 LT |
150 | { |
151 | struct siginfo si; | |
fab95c55 | 152 | jmp_buf *catcher; |
1da177e4 | 153 | int err; |
5d86456d JD |
154 | int is_write = FAULT_WRITE(fi); |
155 | unsigned long address = FAULT_ADDRESS(fi); | |
1da177e4 | 156 | |
4c9e1385 | 157 | if (!is_user && (address >= start_vm) && (address < end_vm)) { |
5d86456d JD |
158 | flush_tlb_kernel_vm(); |
159 | return 0; | |
160 | } | |
4c9e1385 | 161 | else if (current->mm == NULL) { |
377fad3a | 162 | show_regs(container_of(regs, struct pt_regs, regs)); |
4c9e1385 | 163 | panic("Segfault with no mm"); |
377fad3a | 164 | } |
546fe1cb | 165 | |
be662a18 | 166 | if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) |
4c9e1385 JD |
167 | err = handle_page_fault(address, ip, is_write, is_user, |
168 | &si.si_code); | |
546fe1cb PBG |
169 | else { |
170 | err = -EFAULT; | |
4c9e1385 JD |
171 | /* |
172 | * A thread accessed NULL, we get a fault, but CR2 is invalid. | |
173 | * This code is used in __do_copy_from_user() of TT mode. | |
174 | * XXX tt mode is gone, so maybe this isn't needed any more | |
175 | */ | |
546fe1cb PBG |
176 | address = 0; |
177 | } | |
1da177e4 LT |
178 | |
179 | catcher = current->thread.fault_catcher; | |
4c9e1385 | 180 | if (!err) |
5d86456d | 181 | return 0; |
4c9e1385 | 182 | else if (catcher != NULL) { |
1da177e4 | 183 | current->thread.fault_addr = (void *) address; |
fab95c55 | 184 | UML_LONGJMP(catcher, 1); |
1d3468a6 | 185 | } |
4c9e1385 | 186 | else if (current->thread.fault_addr != NULL) |
1da177e4 | 187 | panic("fault_addr set but no fault catcher"); |
4c9e1385 | 188 | else if (!is_user && arch_fixup(ip, regs)) |
5d86456d | 189 | return 0; |
1da177e4 | 190 | |
4c9e1385 | 191 | if (!is_user) { |
377fad3a | 192 | show_regs(container_of(regs, struct pt_regs, regs)); |
1d3468a6 | 193 | panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", |
1da177e4 | 194 | address, ip); |
377fad3a | 195 | } |
1da177e4 | 196 | |
3b52166c | 197 | if (err == -EACCES) { |
1da177e4 LT |
198 | si.si_signo = SIGBUS; |
199 | si.si_errno = 0; | |
200 | si.si_code = BUS_ADRERR; | |
4d338e1a | 201 | si.si_addr = (void __user *)address; |
5d86456d | 202 | current->thread.arch.faultinfo = fi; |
1da177e4 | 203 | force_sig_info(SIGBUS, &si, current); |
3b52166c | 204 | } else if (err == -ENOMEM) { |
4c9e1385 | 205 | printk(KERN_INFO "VM: killing process %s\n", current->comm); |
1da177e4 | 206 | do_exit(SIGKILL); |
3b52166c PBG |
207 | } else { |
208 | BUG_ON(err != -EFAULT); | |
1da177e4 | 209 | si.si_signo = SIGSEGV; |
4d338e1a | 210 | si.si_addr = (void __user *) address; |
5d86456d | 211 | current->thread.arch.faultinfo = fi; |
1da177e4 LT |
212 | force_sig_info(SIGSEGV, &si, current); |
213 | } | |
5d86456d | 214 | return 0; |
1da177e4 LT |
215 | } |
216 | ||
77bf4400 | 217 | void relay_signal(int sig, struct uml_pt_regs *regs) |
1da177e4 | 218 | { |
4c9e1385 | 219 | if (arch_handle_signal(sig, regs)) |
6edf428e JD |
220 | return; |
221 | ||
4c9e1385 JD |
222 | if (!UPT_IS_USER(regs)) { |
223 | if (sig == SIGBUS) | |
224 | printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " | |
225 | "mount likely just ran out of space\n"); | |
1da177e4 | 226 | panic("Kernel mode signal %d", sig); |
6edf428e JD |
227 | } |
228 | ||
5d86456d | 229 | current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); |
1da177e4 LT |
230 | force_sig(sig, current); |
231 | } | |
232 | ||
77bf4400 | 233 | static void bus_handler(int sig, struct uml_pt_regs *regs) |
1da177e4 | 234 | { |
4c9e1385 | 235 | if (current->thread.fault_catcher != NULL) |
fab95c55 | 236 | UML_LONGJMP(current->thread.fault_catcher, 1); |
1da177e4 LT |
237 | else relay_signal(sig, regs); |
238 | } | |
239 | ||
77bf4400 | 240 | static void winch(int sig, struct uml_pt_regs *regs) |
1da177e4 LT |
241 | { |
242 | do_IRQ(WINCH_IRQ, regs); | |
243 | } | |
244 | ||
53dd2b55 JD |
245 | const struct kern_handlers handlinfo_kern = { |
246 | .relay_signal = relay_signal, | |
247 | .winch = winch, | |
248 | .bus_handler = bus_handler, | |
249 | .page_fault = segv_handler, | |
250 | .sigio_handler = sigio_handler, | |
251 | .timer_handler = timer_handler | |
252 | }; | |
253 | ||
1da177e4 LT |
254 | void trap_init(void) |
255 | { | |
256 | } |