]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1995 - 2000 by Ralf Baechle | |
7 | */ | |
c3fc5cd5 | 8 | #include <linux/context_tracking.h> |
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/ptrace.h> | |
d79d853d | 17 | #include <linux/ratelimit.h> |
1da177e4 LT |
18 | #include <linux/mman.h> |
19 | #include <linux/mm.h> | |
20 | #include <linux/smp.h> | |
c1bf207d | 21 | #include <linux/kprobes.h> |
7f788d2d | 22 | #include <linux/perf_event.h> |
70ffdb93 | 23 | #include <linux/uaccess.h> |
1da177e4 LT |
24 | |
25 | #include <asm/branch.h> | |
26 | #include <asm/mmu_context.h> | |
1da177e4 | 27 | #include <asm/ptrace.h> |
16033d61 | 28 | #include <asm/highmem.h> /* For VMALLOC_END */ |
c1bf207d | 29 | #include <linux/kdebug.h> |
1da177e4 | 30 | |
d79d853d MC |
31 | int show_unhandled_signals = 1; |
32 | ||
1da177e4 LT |
33 | /* |
34 | * This routine handles page faults. It determines the address, | |
35 | * and the problem, and then passes it off to one of the appropriate | |
36 | * routines. | |
37 | */ | |
c3fc5cd5 RB |
38 | static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, |
39 | unsigned long address) | |
1da177e4 LT |
40 | { |
41 | struct vm_area_struct * vma = NULL; | |
42 | struct task_struct *tsk = current; | |
43 | struct mm_struct *mm = tsk->mm; | |
44 | const int field = sizeof(unsigned long) * 2; | |
45 | siginfo_t info; | |
83c54070 | 46 | int fault; |
759496ba | 47 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1da177e4 | 48 | |
d79d853d MC |
49 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
50 | ||
1da177e4 | 51 | #if 0 |
d6f70360 | 52 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), |
1da177e4 LT |
53 | current->comm, current->pid, field, address, write, |
54 | field, regs->cp0_epc); | |
55 | #endif | |
56 | ||
c1bf207d DD |
57 | #ifdef CONFIG_KPROBES |
58 | /* | |
e3b28831 | 59 | * This is to notify the fault handler of the kprobes. |
c1bf207d DD |
60 | */ |
61 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, | |
e3b28831 | 62 | current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) |
c1bf207d DD |
63 | return; |
64 | #endif | |
65 | ||
1da177e4 LT |
66 | info.si_code = SEGV_MAPERR; |
67 | ||
68 | /* | |
69 | * We fault-in kernel-space virtual memory on-demand. The | |
70 | * 'reference' page table is init_mm.pgd. | |
71 | * | |
72 | * NOTE! We MUST NOT take any locks for this case. We may | |
73 | * be in an interrupt or a critical region, and should | |
74 | * only copy the information from the master page table, | |
75 | * nothing more. | |
76 | */ | |
2ca2ebfd DD |
77 | #ifdef CONFIG_64BIT |
78 | # define VMALLOC_FAULT_TARGET no_context | |
79 | #else | |
80 | # define VMALLOC_FAULT_TARGET vmalloc_fault | |
81 | #endif | |
82 | ||
16033d61 | 83 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) |
2ca2ebfd | 84 | goto VMALLOC_FAULT_TARGET; |
656be92f AN |
85 | #ifdef MODULE_START |
86 | if (unlikely(address >= MODULE_START && address < MODULE_END)) | |
2ca2ebfd | 87 | goto VMALLOC_FAULT_TARGET; |
656be92f | 88 | #endif |
1da177e4 LT |
89 | |
90 | /* | |
91 | * If we're in an interrupt or have no user | |
92 | * context, we must not take the fault.. | |
93 | */ | |
70ffdb93 | 94 | if (faulthandler_disabled() || !mm) |
1da177e4 LT |
95 | goto bad_area_nosemaphore; |
96 | ||
759496ba JW |
97 | if (user_mode(regs)) |
98 | flags |= FAULT_FLAG_USER; | |
43ca4957 | 99 | retry: |
1da177e4 LT |
100 | down_read(&mm->mmap_sem); |
101 | vma = find_vma(mm, address); | |
102 | if (!vma) | |
103 | goto bad_area; | |
104 | if (vma->vm_start <= address) | |
105 | goto good_area; | |
106 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
107 | goto bad_area; | |
108 | if (expand_stack(vma, address)) | |
109 | goto bad_area; | |
110 | /* | |
111 | * Ok, we have a good vm_area for this memory access, so | |
112 | * we can handle it.. | |
113 | */ | |
114 | good_area: | |
115 | info.si_code = SEGV_ACCERR; | |
116 | ||
117 | if (write) { | |
118 | if (!(vma->vm_flags & VM_WRITE)) | |
119 | goto bad_area; | |
759496ba | 120 | flags |= FAULT_FLAG_WRITE; |
1da177e4 | 121 | } else { |
05857c64 | 122 | if (cpu_has_rixi) { |
6dd9344c DD |
123 | if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { |
124 | #if 0 | |
125 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", | |
126 | raw_smp_processor_id(), | |
127 | current->comm, current->pid, | |
128 | field, address, write, | |
129 | field, regs->cp0_epc); | |
130 | #endif | |
131 | goto bad_area; | |
132 | } | |
e070dab7 RB |
133 | if (!(vma->vm_flags & VM_READ) && |
134 | exception_epc(regs) != address) { | |
6dd9344c DD |
135 | #if 0 |
136 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", | |
137 | raw_smp_processor_id(), | |
138 | current->comm, current->pid, | |
139 | field, address, write, | |
140 | field, regs->cp0_epc); | |
141 | #endif | |
142 | goto bad_area; | |
143 | } | |
144 | } else { | |
145 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | |
146 | goto bad_area; | |
147 | } | |
1da177e4 LT |
148 | } |
149 | ||
1da177e4 LT |
150 | /* |
151 | * If for any reason at all we couldn't handle the fault, | |
152 | * make sure we exit gracefully rather than endlessly redo | |
153 | * the fault. | |
154 | */ | |
dcddffd4 | 155 | fault = handle_mm_fault(vma, address, flags); |
43ca4957 KC |
156 | |
157 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | |
158 | return; | |
159 | ||
a8b0ca17 | 160 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
83c54070 NP |
161 | if (unlikely(fault & VM_FAULT_ERROR)) { |
162 | if (fault & VM_FAULT_OOM) | |
163 | goto out_of_memory; | |
33692f27 LT |
164 | else if (fault & VM_FAULT_SIGSEGV) |
165 | goto bad_area; | |
83c54070 NP |
166 | else if (fault & VM_FAULT_SIGBUS) |
167 | goto do_sigbus; | |
1da177e4 LT |
168 | BUG(); |
169 | } | |
43ca4957 KC |
170 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
171 | if (fault & VM_FAULT_MAJOR) { | |
172 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
173 | regs, address); | |
174 | tsk->maj_flt++; | |
175 | } else { | |
176 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
177 | regs, address); | |
178 | tsk->min_flt++; | |
179 | } | |
180 | if (fault & VM_FAULT_RETRY) { | |
181 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
45cac65b | 182 | flags |= FAULT_FLAG_TRIED; |
43ca4957 KC |
183 | |
184 | /* | |
185 | * No need to up_read(&mm->mmap_sem) as we would | |
186 | * have already released it in __lock_page_or_retry | |
187 | * in mm/filemap.c. | |
188 | */ | |
189 | ||
190 | goto retry; | |
191 | } | |
7f788d2d | 192 | } |
1da177e4 LT |
193 | |
194 | up_read(&mm->mmap_sem); | |
195 | return; | |
196 | ||
197 | /* | |
198 | * Something tried to access memory that isn't in our memory map.. | |
199 | * Fix it, but check if it's kernel or user first.. | |
200 | */ | |
201 | bad_area: | |
202 | up_read(&mm->mmap_sem); | |
203 | ||
204 | bad_area_nosemaphore: | |
205 | /* User mode accesses just cause a SIGSEGV */ | |
206 | if (user_mode(regs)) { | |
207 | tsk->thread.cp0_badvaddr = address; | |
208 | tsk->thread.error_code = write; | |
d79d853d MC |
209 | if (show_unhandled_signals && |
210 | unhandled_signal(tsk, SIGSEGV) && | |
211 | __ratelimit(&ratelimit_state)) { | |
2a872a5d | 212 | pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", |
d79d853d MC |
213 | tsk->comm, |
214 | write ? "write access to" : "read access from", | |
215 | field, address); | |
216 | pr_info("epc = %0*lx in", field, | |
217 | (unsigned long) regs->cp0_epc); | |
2a872a5d MR |
218 | print_vma_addr(KERN_CONT " ", regs->cp0_epc); |
219 | pr_cont("\n"); | |
d79d853d MC |
220 | pr_info("ra = %0*lx in", field, |
221 | (unsigned long) regs->regs[31]); | |
2a872a5d MR |
222 | print_vma_addr(KERN_CONT " ", regs->regs[31]); |
223 | pr_cont("\n"); | |
d79d853d | 224 | } |
e3b28831 | 225 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1da177e4 LT |
226 | info.si_signo = SIGSEGV; |
227 | info.si_errno = 0; | |
228 | /* info.si_code has been set above */ | |
fe00f943 | 229 | info.si_addr = (void __user *) address; |
1da177e4 LT |
230 | force_sig_info(SIGSEGV, &info, tsk); |
231 | return; | |
232 | } | |
233 | ||
234 | no_context: | |
70342287 | 235 | /* Are we prepared to handle this kernel fault? */ |
1da177e4 LT |
236 | if (fixup_exception(regs)) { |
237 | current->thread.cp0_baduaddr = address; | |
238 | return; | |
239 | } | |
240 | ||
241 | /* | |
242 | * Oops. The kernel tried to access some bad page. We'll have to | |
243 | * terminate things with extreme prejudice. | |
244 | */ | |
1da177e4 LT |
245 | bust_spinlocks(1); |
246 | ||
247 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " | |
248 | "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", | |
d6f70360 | 249 | raw_smp_processor_id(), field, address, field, regs->cp0_epc, |
1da177e4 LT |
250 | field, regs->regs[31]); |
251 | die("Oops", regs); | |
252 | ||
1da177e4 | 253 | out_of_memory: |
c7c1e384 RB |
254 | /* |
255 | * We ran out of memory, call the OOM killer, and return the userspace | |
256 | * (which will retry the fault, or kill us if we got oom-killed). | |
257 | */ | |
a887b4da | 258 | up_read(&mm->mmap_sem); |
87134102 JW |
259 | if (!user_mode(regs)) |
260 | goto no_context; | |
c7c1e384 RB |
261 | pagefault_out_of_memory(); |
262 | return; | |
1da177e4 LT |
263 | |
264 | do_sigbus: | |
265 | up_read(&mm->mmap_sem); | |
266 | ||
267 | /* Kernel mode? Handle exceptions or die */ | |
268 | if (!user_mode(regs)) | |
269 | goto no_context; | |
41c594ab | 270 | else |
1da177e4 LT |
271 | /* |
272 | * Send a sigbus, regardless of whether we were in kernel | |
273 | * or user mode. | |
274 | */ | |
41c594ab RB |
275 | #if 0 |
276 | printk("do_page_fault() #3: sending SIGBUS to %s for " | |
277 | "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", | |
278 | tsk->comm, | |
279 | write ? "write access to" : "read access from", | |
280 | field, address, | |
281 | field, (unsigned long) regs->cp0_epc, | |
282 | field, (unsigned long) regs->regs[31]); | |
283 | #endif | |
e3b28831 | 284 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1da177e4 LT |
285 | tsk->thread.cp0_badvaddr = address; |
286 | info.si_signo = SIGBUS; | |
287 | info.si_errno = 0; | |
288 | info.si_code = BUS_ADRERR; | |
fe00f943 | 289 | info.si_addr = (void __user *) address; |
1da177e4 LT |
290 | force_sig_info(SIGBUS, &info, tsk); |
291 | ||
292 | return; | |
2ca2ebfd | 293 | #ifndef CONFIG_64BIT |
1da177e4 LT |
294 | vmalloc_fault: |
295 | { | |
296 | /* | |
297 | * Synchronize this task's top level page-table | |
298 | * with the 'reference' page table. | |
299 | * | |
300 | * Do _not_ use "tsk" here. We might be inside | |
301 | * an interrupt in the middle of a task switch.. | |
302 | */ | |
303 | int offset = __pgd_offset(address); | |
304 | pgd_t *pgd, *pgd_k; | |
c6e8b587 | 305 | pud_t *pud, *pud_k; |
1da177e4 LT |
306 | pmd_t *pmd, *pmd_k; |
307 | pte_t *pte_k; | |
308 | ||
d6f70360 | 309 | pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset; |
1da177e4 LT |
310 | pgd_k = init_mm.pgd + offset; |
311 | ||
312 | if (!pgd_present(*pgd_k)) | |
313 | goto no_context; | |
314 | set_pgd(pgd, *pgd_k); | |
315 | ||
c6e8b587 RB |
316 | pud = pud_offset(pgd, address); |
317 | pud_k = pud_offset(pgd_k, address); | |
318 | if (!pud_present(*pud_k)) | |
319 | goto no_context; | |
320 | ||
321 | pmd = pmd_offset(pud, address); | |
322 | pmd_k = pmd_offset(pud_k, address); | |
1da177e4 LT |
323 | if (!pmd_present(*pmd_k)) |
324 | goto no_context; | |
325 | set_pmd(pmd, *pmd_k); | |
326 | ||
327 | pte_k = pte_offset_kernel(pmd_k, address); | |
328 | if (!pte_present(*pte_k)) | |
329 | goto no_context; | |
330 | return; | |
331 | } | |
2ca2ebfd | 332 | #endif |
1da177e4 | 333 | } |
c3fc5cd5 RB |
334 | |
335 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |
336 | unsigned long write, unsigned long address) | |
337 | { | |
338 | enum ctx_state prev_state; | |
339 | ||
340 | prev_state = exception_enter(); | |
341 | __do_page_fault(regs, write, address); | |
342 | exception_exit(prev_state); | |
343 | } |