]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $ |
2 | * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc. | |
3 | * | |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | |
6 | */ | |
7 | ||
8 | #include <asm/head.h> | |
9 | ||
10 | #include <linux/string.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/ptrace.h> | |
14 | #include <linux/mman.h> | |
15 | #include <linux/signal.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/module.h> | |
1da177e4 LT |
18 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | |
05e14cb3 | 20 | #include <linux/kprobes.h> |
eb398d10 | 21 | #include <linux/kallsyms.h> |
1eeb66a1 | 22 | #include <linux/kdebug.h> |
1da177e4 LT |
23 | |
24 | #include <asm/page.h> | |
25 | #include <asm/pgtable.h> | |
26 | #include <asm/openprom.h> | |
27 | #include <asm/oplib.h> | |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/asi.h> | |
30 | #include <asm/lsu.h> | |
31 | #include <asm/sections.h> | |
7a1ac526 | 32 | #include <asm/mmu_context.h> |
1da177e4 | 33 | |
d98f8f05 | 34 | #ifdef CONFIG_KPROBES |
127cda1e | 35 | static inline int notify_page_fault(struct pt_regs *regs) |
d98f8f05 | 36 | { |
127cda1e DM |
37 | int ret = 0; |
38 | ||
39 | /* kprobe_running() needs smp_processor_id() */ | |
40 | if (!user_mode(regs)) { | |
41 | preempt_disable(); | |
42 | if (kprobe_running() && kprobe_fault_handler(regs, 0)) | |
43 | ret = 1; | |
44 | preempt_enable(); | |
45 | } | |
46 | return ret; | |
d98f8f05 AK |
47 | } |
48 | #else | |
127cda1e | 49 | static inline int notify_page_fault(struct pt_regs *regs) |
d98f8f05 | 50 | { |
127cda1e | 51 | return 0; |
d98f8f05 AK |
52 | } |
53 | #endif | |
54 | ||
1da177e4 LT |
55 | /* |
56 | * To debug kernel to catch accesses to certain virtual/physical addresses. | |
57 | * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. | |
58 | * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses. | |
59 | * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be | |
60 | * watched. This is only useful on a single cpu machine for now. After the watchpoint | |
61 | * is detected, the process causing it will be killed, thus preventing an infinite loop. | |
62 | */ | |
63 | void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode) | |
64 | { | |
65 | unsigned long lsubits; | |
66 | ||
67 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
68 | : "=r" (lsubits) | |
69 | : "i" (ASI_LSU_CONTROL)); | |
70 | lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM | | |
71 | LSU_CONTROL_PR | LSU_CONTROL_VR | | |
72 | LSU_CONTROL_PW | LSU_CONTROL_VW); | |
73 | ||
74 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
75 | "membar #Sync" | |
76 | : /* no outputs */ | |
77 | : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT), | |
78 | "i" (ASI_DMMU)); | |
79 | ||
80 | lsubits |= ((unsigned long)mask << (mode ? 25 : 33)); | |
81 | if (flags & VM_READ) | |
82 | lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR); | |
83 | if (flags & VM_WRITE) | |
84 | lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW); | |
85 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | |
86 | "membar #Sync" | |
87 | : /* no outputs */ | |
88 | : "r" (lsubits), "i" (ASI_LSU_CONTROL) | |
89 | : "memory"); | |
90 | } | |
91 | ||
05e14cb3 PP |
92 | static void __kprobes unhandled_fault(unsigned long address, |
93 | struct task_struct *tsk, | |
94 | struct pt_regs *regs) | |
1da177e4 LT |
95 | { |
96 | if ((unsigned long) address < PAGE_SIZE) { | |
97 | printk(KERN_ALERT "Unable to handle kernel NULL " | |
98 | "pointer dereference\n"); | |
99 | } else { | |
100 | printk(KERN_ALERT "Unable to handle kernel paging request " | |
101 | "at virtual address %016lx\n", (unsigned long)address); | |
102 | } | |
103 | printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n", | |
104 | (tsk->mm ? | |
105 | CTX_HWBITS(tsk->mm->context) : | |
106 | CTX_HWBITS(tsk->active_mm->context))); | |
107 | printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n", | |
108 | (tsk->mm ? (unsigned long) tsk->mm->pgd : | |
109 | (unsigned long) tsk->active_mm->pgd)); | |
1da177e4 LT |
110 | die_if_kernel("Oops", regs); |
111 | } | |
112 | ||
bf941d6c | 113 | static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) |
1da177e4 | 114 | { |
1da177e4 LT |
115 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", |
116 | regs->tpc); | |
eb398d10 DM |
117 | printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); |
118 | print_symbol("RPC: <%s>\n", regs->u_regs[15]); | |
bf941d6c | 119 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); |
c1f193a7 | 120 | dump_stack(); |
1da177e4 LT |
121 | unhandled_fault(regs->tpc, current, regs); |
122 | } | |
123 | ||
124 | /* | |
125 | * We now make sure that mmap_sem is held in all paths that call | |
126 | * this. Additionally, to prevent kswapd from ripping ptes from | |
127 | * under us, raise interrupts around the time that we look at the | |
128 | * pte, kswapd will have to wait to get his smp ipi response from | |
da160546 | 129 | * us. vmtruncate likewise. This saves us having to get pte lock. |
1da177e4 LT |
130 | */ |
131 | static unsigned int get_user_insn(unsigned long tpc) | |
132 | { | |
133 | pgd_t *pgdp = pgd_offset(current->mm, tpc); | |
134 | pud_t *pudp; | |
135 | pmd_t *pmdp; | |
136 | pte_t *ptep, pte; | |
137 | unsigned long pa; | |
138 | u32 insn = 0; | |
139 | unsigned long pstate; | |
140 | ||
141 | if (pgd_none(*pgdp)) | |
142 | goto outret; | |
143 | pudp = pud_offset(pgdp, tpc); | |
144 | if (pud_none(*pudp)) | |
145 | goto outret; | |
146 | pmdp = pmd_offset(pudp, tpc); | |
147 | if (pmd_none(*pmdp)) | |
148 | goto outret; | |
149 | ||
150 | /* This disables preemption for us as well. */ | |
151 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
152 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | |
153 | : : "r" (pstate), "i" (PSTATE_IE)); | |
154 | ptep = pte_offset_map(pmdp, tpc); | |
155 | pte = *ptep; | |
156 | if (!pte_present(pte)) | |
157 | goto out; | |
158 | ||
c4bce90e | 159 | pa = (pte_pfn(pte) << PAGE_SHIFT); |
1da177e4 LT |
160 | pa += (tpc & ~PAGE_MASK); |
161 | ||
162 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | |
163 | __asm__ __volatile__("lduwa [%1] %2, %0" | |
164 | : "=r" (insn) | |
165 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | |
166 | ||
167 | out: | |
168 | pte_unmap(ptep); | |
169 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | |
170 | outret: | |
171 | return insn; | |
172 | } | |
173 | ||
174 | extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int); | |
175 | ||
176 | static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, | |
177 | unsigned int insn, int fault_code) | |
178 | { | |
179 | siginfo_t info; | |
180 | ||
181 | info.si_code = code; | |
182 | info.si_signo = sig; | |
183 | info.si_errno = 0; | |
184 | if (fault_code & FAULT_CODE_ITLB) | |
185 | info.si_addr = (void __user *) regs->tpc; | |
186 | else | |
187 | info.si_addr = (void __user *) | |
188 | compute_effective_address(regs, insn, 0); | |
189 | info.si_trapno = 0; | |
190 | force_sig_info(sig, &info, current); | |
191 | } | |
192 | ||
193 | extern int handle_ldf_stq(u32, struct pt_regs *); | |
194 | extern int handle_ld_nf(u32, struct pt_regs *); | |
195 | ||
196 | static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) | |
197 | { | |
198 | if (!insn) { | |
199 | if (!regs->tpc || (regs->tpc & 0x3)) | |
200 | return 0; | |
201 | if (regs->tstate & TSTATE_PRIV) { | |
202 | insn = *(unsigned int *) regs->tpc; | |
203 | } else { | |
204 | insn = get_user_insn(regs->tpc); | |
205 | } | |
206 | } | |
207 | return insn; | |
208 | } | |
209 | ||
210 | static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | |
211 | unsigned int insn, unsigned long address) | |
212 | { | |
1da177e4 LT |
213 | unsigned char asi = ASI_P; |
214 | ||
215 | if ((!insn) && (regs->tstate & TSTATE_PRIV)) | |
216 | goto cannot_handle; | |
217 | ||
218 | /* If user insn could be read (thus insn is zero), that | |
219 | * is fine. We will just gun down the process with a signal | |
220 | * in that case. | |
221 | */ | |
222 | ||
223 | if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) && | |
224 | (insn & 0xc0800000) == 0xc0800000) { | |
225 | if (insn & 0x2000) | |
226 | asi = (regs->tstate >> 24); | |
227 | else | |
228 | asi = (insn >> 5); | |
229 | if ((asi & 0xf2) == 0x82) { | |
230 | if (insn & 0x1000000) { | |
231 | handle_ldf_stq(insn, regs); | |
232 | } else { | |
233 | /* This was a non-faulting load. Just clear the | |
234 | * destination register(s) and continue with the next | |
235 | * instruction. -jj | |
236 | */ | |
237 | handle_ld_nf(insn, regs); | |
238 | } | |
239 | return; | |
240 | } | |
241 | } | |
242 | ||
1da177e4 LT |
243 | /* Is this in ex_table? */ |
244 | if (regs->tstate & TSTATE_PRIV) { | |
8cf14af0 | 245 | const struct exception_table_entry *entry; |
1da177e4 | 246 | |
622eaec6 DM |
247 | entry = search_exception_tables(regs->tpc); |
248 | if (entry) { | |
8cf14af0 | 249 | regs->tpc = entry->fixup; |
1da177e4 | 250 | regs->tnpc = regs->tpc + 4; |
1da177e4 LT |
251 | return; |
252 | } | |
253 | } else { | |
254 | /* The si_code was set to make clear whether | |
255 | * this was a SEGV_MAPERR or SEGV_ACCERR fault. | |
256 | */ | |
257 | do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); | |
258 | return; | |
259 | } | |
260 | ||
261 | cannot_handle: | |
262 | unhandled_fault (address, current, regs); | |
263 | } | |
264 | ||
05e14cb3 | 265 | asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) |
1da177e4 LT |
266 | { |
267 | struct mm_struct *mm = current->mm; | |
268 | struct vm_area_struct *vma; | |
269 | unsigned int insn = 0; | |
83c54070 | 270 | int si_code, fault_code, fault; |
7a1ac526 | 271 | unsigned long address, mm_rss; |
1da177e4 LT |
272 | |
273 | fault_code = get_thread_fault_code(); | |
274 | ||
127cda1e | 275 | if (notify_page_fault(regs)) |
1da177e4 LT |
276 | return; |
277 | ||
278 | si_code = SEGV_MAPERR; | |
279 | address = current_thread_info()->fault_address; | |
280 | ||
281 | if ((fault_code & FAULT_CODE_ITLB) && | |
282 | (fault_code & FAULT_CODE_DTLB)) | |
283 | BUG(); | |
284 | ||
285 | if (regs->tstate & TSTATE_PRIV) { | |
286 | unsigned long tpc = regs->tpc; | |
287 | ||
288 | /* Sanity check the PC. */ | |
be71716e | 289 | if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || |
1da177e4 LT |
290 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { |
291 | /* Valid, no problems... */ | |
292 | } else { | |
bf941d6c | 293 | bad_kernel_pc(regs, address); |
1da177e4 LT |
294 | return; |
295 | } | |
296 | } | |
297 | ||
298 | /* | |
299 | * If we're in an interrupt or have no user | |
300 | * context, we must not take the fault.. | |
301 | */ | |
302 | if (in_atomic() || !mm) | |
303 | goto intr_or_no_mm; | |
304 | ||
305 | if (test_thread_flag(TIF_32BIT)) { | |
306 | if (!(regs->tstate & TSTATE_PRIV)) | |
307 | regs->tpc &= 0xffffffff; | |
308 | address &= 0xffffffff; | |
309 | } | |
310 | ||
311 | if (!down_read_trylock(&mm->mmap_sem)) { | |
312 | if ((regs->tstate & TSTATE_PRIV) && | |
313 | !search_exception_tables(regs->tpc)) { | |
314 | insn = get_fault_insn(regs, insn); | |
315 | goto handle_kernel_fault; | |
316 | } | |
317 | down_read(&mm->mmap_sem); | |
318 | } | |
319 | ||
320 | vma = find_vma(mm, address); | |
321 | if (!vma) | |
322 | goto bad_area; | |
323 | ||
324 | /* Pure DTLB misses do not tell us whether the fault causing | |
325 | * load/store/atomic was a write or not, it only says that there | |
326 | * was no match. So in such a case we (carefully) read the | |
327 | * instruction to try and figure this out. It's an optimization | |
328 | * so it's ok if we can't do this. | |
329 | * | |
330 | * Special hack, window spill/fill knows the exact fault type. | |
331 | */ | |
332 | if (((fault_code & | |
333 | (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) && | |
334 | (vma->vm_flags & VM_WRITE) != 0) { | |
335 | insn = get_fault_insn(regs, 0); | |
336 | if (!insn) | |
337 | goto continue_fault; | |
73c50a27 DM |
338 | /* All loads, stores and atomics have bits 30 and 31 both set |
339 | * in the instruction. Bit 21 is set in all stores, but we | |
340 | * have to avoid prefetches which also have bit 21 set. | |
341 | */ | |
1da177e4 | 342 | if ((insn & 0xc0200000) == 0xc0200000 && |
73c50a27 | 343 | (insn & 0x01780000) != 0x01680000) { |
1da177e4 LT |
344 | /* Don't bother updating thread struct value, |
345 | * because update_mmu_cache only cares which tlb | |
346 | * the access came from. | |
347 | */ | |
348 | fault_code |= FAULT_CODE_WRITE; | |
349 | } | |
350 | } | |
351 | continue_fault: | |
352 | ||
353 | if (vma->vm_start <= address) | |
354 | goto good_area; | |
355 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
356 | goto bad_area; | |
357 | if (!(fault_code & FAULT_CODE_WRITE)) { | |
358 | /* Non-faulting loads shouldn't expand stack. */ | |
359 | insn = get_fault_insn(regs, insn); | |
360 | if ((insn & 0xc0800000) == 0xc0800000) { | |
361 | unsigned char asi; | |
362 | ||
363 | if (insn & 0x2000) | |
364 | asi = (regs->tstate >> 24); | |
365 | else | |
366 | asi = (insn >> 5); | |
367 | if ((asi & 0xf2) == 0x82) | |
368 | goto bad_area; | |
369 | } | |
370 | } | |
371 | if (expand_stack(vma, address)) | |
372 | goto bad_area; | |
373 | /* | |
374 | * Ok, we have a good vm_area for this memory access, so | |
375 | * we can handle it.. | |
376 | */ | |
377 | good_area: | |
378 | si_code = SEGV_ACCERR; | |
379 | ||
380 | /* If we took a ITLB miss on a non-executable page, catch | |
381 | * that here. | |
382 | */ | |
383 | if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) { | |
384 | BUG_ON(address != regs->tpc); | |
385 | BUG_ON(regs->tstate & TSTATE_PRIV); | |
386 | goto bad_area; | |
387 | } | |
388 | ||
389 | if (fault_code & FAULT_CODE_WRITE) { | |
390 | if (!(vma->vm_flags & VM_WRITE)) | |
391 | goto bad_area; | |
392 | ||
393 | /* Spitfire has an icache which does not snoop | |
394 | * processor stores. Later processors do... | |
395 | */ | |
396 | if (tlb_type == spitfire && | |
397 | (vma->vm_flags & VM_EXEC) != 0 && | |
398 | vma->vm_file != NULL) | |
399 | set_thread_fault_code(fault_code | | |
400 | FAULT_CODE_BLKCOMMIT); | |
401 | } else { | |
402 | /* Allow reads even for write-only mappings */ | |
403 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
404 | goto bad_area; | |
405 | } | |
406 | ||
83c54070 NP |
407 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); |
408 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
409 | if (fault & VM_FAULT_OOM) | |
410 | goto out_of_memory; | |
411 | else if (fault & VM_FAULT_SIGBUS) | |
412 | goto do_sigbus; | |
1da177e4 LT |
413 | BUG(); |
414 | } | |
83c54070 NP |
415 | if (fault & VM_FAULT_MAJOR) |
416 | current->maj_flt++; | |
417 | else | |
418 | current->min_flt++; | |
1da177e4 LT |
419 | |
420 | up_read(&mm->mmap_sem); | |
7a1ac526 DM |
421 | |
422 | mm_rss = get_mm_rss(mm); | |
dcc1e8dd DM |
423 | #ifdef CONFIG_HUGETLB_PAGE |
424 | mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); | |
425 | #endif | |
7bebd83d | 426 | if (unlikely(mm_rss > |
dcc1e8dd DM |
427 | mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) |
428 | tsb_grow(mm, MM_TSB_BASE, mm_rss); | |
429 | #ifdef CONFIG_HUGETLB_PAGE | |
430 | mm_rss = mm->context.huge_pte_count; | |
7bebd83d | 431 | if (unlikely(mm_rss > |
dcc1e8dd DM |
432 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) |
433 | tsb_grow(mm, MM_TSB_HUGE, mm_rss); | |
434 | #endif | |
efdc1e20 | 435 | return; |
1da177e4 LT |
436 | |
437 | /* | |
438 | * Something tried to access memory that isn't in our memory map.. | |
439 | * Fix it, but check if it's kernel or user first.. | |
440 | */ | |
441 | bad_area: | |
442 | insn = get_fault_insn(regs, insn); | |
443 | up_read(&mm->mmap_sem); | |
444 | ||
445 | handle_kernel_fault: | |
446 | do_kernel_fault(regs, si_code, fault_code, insn, address); | |
efdc1e20 | 447 | return; |
1da177e4 LT |
448 | |
449 | /* | |
450 | * We ran out of memory, or some other thing happened to us that made | |
451 | * us unable to handle the page fault gracefully. | |
452 | */ | |
453 | out_of_memory: | |
454 | insn = get_fault_insn(regs, insn); | |
455 | up_read(&mm->mmap_sem); | |
456 | printk("VM: killing process %s\n", current->comm); | |
457 | if (!(regs->tstate & TSTATE_PRIV)) | |
dcca2bde | 458 | do_group_exit(SIGKILL); |
1da177e4 LT |
459 | goto handle_kernel_fault; |
460 | ||
461 | intr_or_no_mm: | |
462 | insn = get_fault_insn(regs, 0); | |
463 | goto handle_kernel_fault; | |
464 | ||
465 | do_sigbus: | |
466 | insn = get_fault_insn(regs, insn); | |
467 | up_read(&mm->mmap_sem); | |
468 | ||
469 | /* | |
470 | * Send a sigbus, regardless of whether we were in kernel | |
471 | * or user mode. | |
472 | */ | |
473 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); | |
474 | ||
475 | /* Kernel mode? Handle exceptions or die */ | |
476 | if (regs->tstate & TSTATE_PRIV) | |
477 | goto handle_kernel_fault; | |
1da177e4 | 478 | } |