]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/m32r/mm/fault.c | |
3 | * | |
4 | * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo | |
5 | * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka | |
6 | * | |
7 | * Some code taken from i386 version. | |
8 | * Copyright (C) 1995 Linus Torvalds | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/mman.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/smp.h> | |
1da177e4 LT |
21 | #include <linux/interrupt.h> |
22 | #include <linux/init.h> | |
23 | #include <linux/tty.h> | |
24 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/module.h> | |
27 | ||
28 | #include <asm/m32r.h> | |
29 | #include <asm/system.h> | |
30 | #include <asm/uaccess.h> | |
31 | #include <asm/hardirq.h> | |
32 | #include <asm/mmu_context.h> | |
33 | #include <asm/tlbflush.h> | |
34 | ||
35 | extern void die(const char *, struct pt_regs *, long); | |
36 | ||
37 | #ifndef CONFIG_SMP | |
38 | asmlinkage unsigned int tlb_entry_i_dat; | |
39 | asmlinkage unsigned int tlb_entry_d_dat; | |
40 | #define tlb_entry_i tlb_entry_i_dat | |
41 | #define tlb_entry_d tlb_entry_d_dat | |
42 | #else | |
43 | unsigned int tlb_entry_i_dat[NR_CPUS]; | |
44 | unsigned int tlb_entry_d_dat[NR_CPUS]; | |
45 | #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()] | |
46 | #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()] | |
47 | #endif | |
48 | ||
49 | extern void init_tlb(void); | |
50 | ||
1da177e4 LT |
51 | /*======================================================================* |
52 | * do_page_fault() | |
53 | *======================================================================* | |
54 | * This routine handles page faults. It determines the address, | |
55 | * and the problem, and then passes it off to one of the appropriate | |
56 | * routines. | |
57 | * | |
58 | * ARGUMENT: | |
59 | * regs : M32R SP reg. | |
60 | * error_code : See below | |
61 | * address : M32R MMU MDEVA reg. (Operand ACE) | |
62 | * : M32R BPC reg. (Instruction ACE) | |
63 | * | |
64 | * error_code : | |
65 | * bit 0 == 0 means no page found, 1 means protection fault | |
66 | * bit 1 == 0 means read, 1 means write | |
67 | * bit 2 == 0 means kernel, 1 means user-mode | |
68 | * bit 3 == 0 means data, 1 means instruction | |
69 | *======================================================================*/ | |
70 | #define ACE_PROTECTION 1 | |
71 | #define ACE_WRITE 2 | |
72 | #define ACE_USERMODE 4 | |
73 | #define ACE_INSTRUCTION 8 | |
74 | ||
75 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, | |
76 | unsigned long address) | |
77 | { | |
78 | struct task_struct *tsk; | |
79 | struct mm_struct *mm; | |
80 | struct vm_area_struct * vma; | |
81 | unsigned long page, addr; | |
82 | int write; | |
83c54070 | 83 | int fault; |
1da177e4 LT |
84 | siginfo_t info; |
85 | ||
86 | /* | |
87 | * If BPSW IE bit enable --> set PSW IE bit | |
88 | */ | |
89 | if (regs->psw & M32R_PSW_BIE) | |
90 | local_irq_enable(); | |
91 | ||
92 | tsk = current; | |
93 | ||
94 | info.si_code = SEGV_MAPERR; | |
95 | ||
96 | /* | |
97 | * We fault-in kernel-space virtual memory on-demand. The | |
98 | * 'reference' page table is init_mm.pgd. | |
99 | * | |
100 | * NOTE! We MUST NOT take any locks for this case. We may | |
101 | * be in an interrupt or a critical region, and should | |
102 | * only copy the information from the master page table, | |
103 | * nothing more. | |
104 | * | |
105 | * This verifies that the fault happens in kernel space | |
106 | * (error_code & ACE_USERMODE) == 0, and that the fault was not a | |
107 | * protection error (error_code & ACE_PROTECTION) == 0. | |
108 | */ | |
109 | if (address >= TASK_SIZE && !(error_code & ACE_USERMODE)) | |
110 | goto vmalloc_fault; | |
111 | ||
112 | mm = tsk->mm; | |
113 | ||
114 | /* | |
115 | * If we're in an interrupt or have no user context or are running in an | |
116 | * atomic region then we must not take the fault.. | |
117 | */ | |
118 | if (in_atomic() || !mm) | |
119 | goto bad_area_nosemaphore; | |
120 | ||
121 | /* When running in the kernel we expect faults to occur only to | |
122 | * addresses in user space. All other faults represent errors in the | |
123 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | |
80f7228b | 124 | * erroneous fault occurring in a code path which already holds mmap_sem |
1da177e4 LT |
125 | * we will deadlock attempting to validate the fault against the |
126 | * address space. Luckily the kernel only validly references user | |
127 | * space from well defined areas of code, which are listed in the | |
128 | * exceptions table. | |
129 | * | |
130 | * As the vast majority of faults will be valid we will only perform | |
131 | * the source reference check when there is a possibilty of a deadlock. | |
132 | * Attempt to lock the address space, if we cannot we then validate the | |
133 | * source. If this is invalid we can skip the address space check, | |
134 | * thus avoiding the deadlock. | |
135 | */ | |
136 | if (!down_read_trylock(&mm->mmap_sem)) { | |
137 | if ((error_code & ACE_USERMODE) == 0 && | |
138 | !search_exception_tables(regs->psw)) | |
139 | goto bad_area_nosemaphore; | |
140 | down_read(&mm->mmap_sem); | |
141 | } | |
142 | ||
143 | vma = find_vma(mm, address); | |
144 | if (!vma) | |
145 | goto bad_area; | |
146 | if (vma->vm_start <= address) | |
147 | goto good_area; | |
148 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
149 | goto bad_area; | |
6b8bd3f4 | 150 | |
1da177e4 LT |
151 | if (error_code & ACE_USERMODE) { |
152 | /* | |
153 | * accessing the stack below "spu" is always a bug. | |
154 | * The "+ 4" is there due to the push instruction | |
155 | * doing pre-decrement on the stack and that | |
156 | * doesn't show up until later.. | |
157 | */ | |
158 | if (address + 4 < regs->spu) | |
159 | goto bad_area; | |
160 | } | |
6b8bd3f4 | 161 | |
1da177e4 LT |
162 | if (expand_stack(vma, address)) |
163 | goto bad_area; | |
164 | /* | |
165 | * Ok, we have a good vm_area for this memory access, so | |
166 | * we can handle it.. | |
167 | */ | |
168 | good_area: | |
169 | info.si_code = SEGV_ACCERR; | |
170 | write = 0; | |
171 | switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { | |
172 | default: /* 3: write, present */ | |
173 | /* fall through */ | |
174 | case ACE_WRITE: /* write, not present */ | |
175 | if (!(vma->vm_flags & VM_WRITE)) | |
176 | goto bad_area; | |
177 | write++; | |
178 | break; | |
179 | case ACE_PROTECTION: /* read, present */ | |
180 | case 0: /* read, not present */ | |
181 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
182 | goto bad_area; | |
183 | } | |
184 | ||
185 | /* | |
186 | * For instruction access exception, check if the area is executable | |
187 | */ | |
188 | if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) | |
189 | goto bad_area; | |
190 | ||
191 | survive: | |
192 | /* | |
193 | * If for any reason at all we couldn't handle the fault, | |
194 | * make sure we exit gracefully rather than endlessly redo | |
195 | * the fault. | |
196 | */ | |
197 | addr = (address & PAGE_MASK); | |
198 | set_thread_fault_code(error_code); | |
83c54070 NP |
199 | fault = handle_mm_fault(mm, vma, addr, write); |
200 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
201 | if (fault & VM_FAULT_OOM) | |
1da177e4 | 202 | goto out_of_memory; |
83c54070 NP |
203 | else if (fault & VM_FAULT_SIGBUS) |
204 | goto do_sigbus; | |
205 | BUG(); | |
1da177e4 | 206 | } |
83c54070 NP |
207 | if (fault & VM_FAULT_MAJOR) |
208 | tsk->maj_flt++; | |
209 | else | |
210 | tsk->min_flt++; | |
1da177e4 LT |
211 | set_thread_fault_code(0); |
212 | up_read(&mm->mmap_sem); | |
213 | return; | |
214 | ||
215 | /* | |
216 | * Something tried to access memory that isn't in our memory map.. | |
217 | * Fix it, but check if it's kernel or user first.. | |
218 | */ | |
219 | bad_area: | |
220 | up_read(&mm->mmap_sem); | |
221 | ||
222 | bad_area_nosemaphore: | |
223 | /* User mode accesses just cause a SIGSEGV */ | |
224 | if (error_code & ACE_USERMODE) { | |
225 | tsk->thread.address = address; | |
226 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
227 | tsk->thread.trap_no = 14; | |
228 | info.si_signo = SIGSEGV; | |
229 | info.si_errno = 0; | |
230 | /* info.si_code has been set above */ | |
231 | info.si_addr = (void __user *)address; | |
232 | force_sig_info(SIGSEGV, &info, tsk); | |
233 | return; | |
234 | } | |
235 | ||
236 | no_context: | |
237 | /* Are we prepared to handle this kernel fault? */ | |
238 | if (fixup_exception(regs)) | |
239 | return; | |
240 | ||
241 | /* | |
242 | * Oops. The kernel tried to access some bad page. We'll have to | |
243 | * terminate things with extreme prejudice. | |
244 | */ | |
245 | ||
246 | bust_spinlocks(1); | |
247 | ||
248 | if (address < PAGE_SIZE) | |
249 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | |
250 | else | |
251 | printk(KERN_ALERT "Unable to handle kernel paging request"); | |
252 | printk(" at virtual address %08lx\n",address); | |
253 | printk(KERN_ALERT " printing bpc:\n"); | |
254 | printk("%08lx\n", regs->bpc); | |
255 | page = *(unsigned long *)MPTB; | |
256 | page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; | |
257 | printk(KERN_ALERT "*pde = %08lx\n", page); | |
258 | if (page & _PAGE_PRESENT) { | |
259 | page &= PAGE_MASK; | |
260 | address &= 0x003ff000; | |
261 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | |
262 | printk(KERN_ALERT "*pte = %08lx\n", page); | |
263 | } | |
264 | die("Oops", regs, error_code); | |
265 | bust_spinlocks(0); | |
266 | do_exit(SIGKILL); | |
267 | ||
268 | /* | |
269 | * We ran out of memory, or some other thing happened to us that made | |
270 | * us unable to handle the page fault gracefully. | |
271 | */ | |
272 | out_of_memory: | |
273 | up_read(&mm->mmap_sem); | |
b460cbc5 | 274 | if (is_global_init(tsk)) { |
1da177e4 LT |
275 | yield(); |
276 | down_read(&mm->mmap_sem); | |
277 | goto survive; | |
278 | } | |
279 | printk("VM: killing process %s\n", tsk->comm); | |
280 | if (error_code & ACE_USERMODE) | |
dcca2bde | 281 | do_group_exit(SIGKILL); |
1da177e4 LT |
282 | goto no_context; |
283 | ||
284 | do_sigbus: | |
285 | up_read(&mm->mmap_sem); | |
286 | ||
287 | /* Kernel mode? Handle exception or die */ | |
288 | if (!(error_code & ACE_USERMODE)) | |
289 | goto no_context; | |
290 | ||
291 | tsk->thread.address = address; | |
292 | tsk->thread.error_code = error_code; | |
293 | tsk->thread.trap_no = 14; | |
294 | info.si_signo = SIGBUS; | |
295 | info.si_errno = 0; | |
296 | info.si_code = BUS_ADRERR; | |
297 | info.si_addr = (void __user *)address; | |
298 | force_sig_info(SIGBUS, &info, tsk); | |
299 | return; | |
300 | ||
301 | vmalloc_fault: | |
302 | { | |
303 | /* | |
304 | * Synchronize this task's top level page-table | |
305 | * with the 'reference' page table. | |
306 | * | |
307 | * Do _not_ use "tsk" here. We might be inside | |
308 | * an interrupt in the middle of a task switch.. | |
309 | */ | |
310 | int offset = pgd_index(address); | |
311 | pgd_t *pgd, *pgd_k; | |
312 | pmd_t *pmd, *pmd_k; | |
313 | pte_t *pte_k; | |
314 | ||
315 | pgd = (pgd_t *)*(unsigned long *)MPTB; | |
316 | pgd = offset + (pgd_t *)pgd; | |
317 | pgd_k = init_mm.pgd + offset; | |
318 | ||
319 | if (!pgd_present(*pgd_k)) | |
320 | goto no_context; | |
321 | ||
322 | /* | |
323 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
324 | * and redundant with the set_pmd() on non-PAE. | |
325 | */ | |
326 | ||
327 | pmd = pmd_offset(pgd, address); | |
328 | pmd_k = pmd_offset(pgd_k, address); | |
329 | if (!pmd_present(*pmd_k)) | |
330 | goto no_context; | |
331 | set_pmd(pmd, *pmd_k); | |
332 | ||
333 | pte_k = pte_offset_kernel(pmd_k, address); | |
334 | if (!pte_present(*pte_k)) | |
335 | goto no_context; | |
336 | ||
9b87ed79 HT |
337 | addr = (address & PAGE_MASK); |
338 | set_thread_fault_code(error_code); | |
1da177e4 | 339 | update_mmu_cache(NULL, addr, *pte_k); |
9b87ed79 | 340 | set_thread_fault_code(0); |
1da177e4 LT |
341 | return; |
342 | } | |
343 | } | |
344 | ||
345 | /*======================================================================* | |
346 | * update_mmu_cache() | |
347 | *======================================================================*/ | |
348 | #define TLB_MASK (NR_TLB_ENTRIES - 1) | |
349 | #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) | |
350 | #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) | |
351 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, | |
352 | pte_t pte) | |
353 | { | |
9b87ed79 | 354 | volatile unsigned long *entry1, *entry2; |
1da177e4 LT |
355 | unsigned long pte_data, flags; |
356 | unsigned int *entry_dat; | |
357 | int inst = get_thread_fault_code() & ACE_INSTRUCTION; | |
358 | int i; | |
359 | ||
360 | /* Ptrace may call this routine. */ | |
361 | if (vma && current->active_mm != vma->vm_mm) | |
362 | return; | |
363 | ||
364 | local_irq_save(flags); | |
365 | ||
366 | vaddr = (vaddr & PAGE_MASK) | get_asid(); | |
367 | ||
9b87ed79 HT |
368 | pte_data = pte_val(pte); |
369 | ||
1da177e4 LT |
370 | #ifdef CONFIG_CHIP_OPSP |
371 | entry1 = (unsigned long *)ITLB_BASE; | |
9b87ed79 HT |
372 | for (i = 0; i < NR_TLB_ENTRIES; i++) { |
373 | if (*entry1++ == vaddr) { | |
374 | set_tlb_data(entry1, pte_data); | |
375 | break; | |
376 | } | |
377 | entry1++; | |
1da177e4 LT |
378 | } |
379 | entry2 = (unsigned long *)DTLB_BASE; | |
9b87ed79 HT |
380 | for (i = 0; i < NR_TLB_ENTRIES; i++) { |
381 | if (*entry2++ == vaddr) { | |
382 | set_tlb_data(entry2, pte_data); | |
383 | break; | |
384 | } | |
385 | entry2++; | |
1da177e4 | 386 | } |
1da177e4 | 387 | #else |
1da177e4 LT |
388 | /* |
389 | * Update TLB entries | |
390 | * entry1: ITLB entry address | |
391 | * entry2: DTLB entry address | |
392 | */ | |
393 | __asm__ __volatile__ ( | |
394 | "seth %0, #high(%4) \n\t" | |
395 | "st %2, @(%5, %0) \n\t" | |
396 | "ldi %1, #1 \n\t" | |
397 | "st %1, @(%6, %0) \n\t" | |
398 | "add3 r4, %0, %7 \n\t" | |
399 | ".fillinsn \n" | |
400 | "1: \n\t" | |
401 | "ld %1, @(%6, %0) \n\t" | |
402 | "bnez %1, 1b \n\t" | |
403 | "ld %0, @r4+ \n\t" | |
404 | "ld %1, @r4 \n\t" | |
405 | "st %3, @+%0 \n\t" | |
406 | "st %3, @+%1 \n\t" | |
407 | : "=&r" (entry1), "=&r" (entry2) | |
408 | : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE), | |
409 | "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset) | |
410 | : "r4", "memory" | |
411 | ); | |
9b87ed79 | 412 | #endif |
1da177e4 LT |
413 | |
414 | if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END)) | |
415 | goto notfound; | |
416 | ||
417 | found: | |
418 | local_irq_restore(flags); | |
419 | ||
420 | return; | |
421 | ||
422 | /* Valid entry not found */ | |
423 | notfound: | |
424 | /* | |
425 | * Update ITLB or DTLB entry | |
426 | * entry1: TLB entry address | |
427 | * entry2: TLB base address | |
428 | */ | |
429 | if (!inst) { | |
430 | entry2 = (unsigned long *)DTLB_BASE; | |
431 | entry_dat = &tlb_entry_d; | |
432 | } else { | |
433 | entry2 = (unsigned long *)ITLB_BASE; | |
434 | entry_dat = &tlb_entry_i; | |
435 | } | |
436 | entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1); | |
437 | ||
438 | for (i = 0 ; i < NR_TLB_ENTRIES ; i++) { | |
439 | if (!(entry1[1] & 2)) /* Valid bit check */ | |
440 | break; | |
441 | ||
442 | if (entry1 != entry2) | |
443 | entry1 -= 2; | |
444 | else | |
445 | entry1 += TLB_MASK << 1; | |
446 | } | |
447 | ||
448 | if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */ | |
449 | entry1 = entry2 + (*entry_dat << 1); | |
450 | *entry_dat = (*entry_dat + 1) & TLB_MASK; | |
451 | } | |
452 | *entry1++ = vaddr; /* Set TLB tag */ | |
453 | set_tlb_data(entry1, pte_data); | |
454 | ||
455 | goto found; | |
1da177e4 LT |
456 | } |
457 | ||
458 | /*======================================================================* | |
459 | * flush_tlb_page() : flushes one page | |
460 | *======================================================================*/ | |
461 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
462 | { | |
463 | if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { | |
464 | unsigned long flags; | |
465 | ||
466 | local_irq_save(flags); | |
467 | page &= PAGE_MASK; | |
468 | page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); | |
469 | __flush_tlb_page(page); | |
470 | local_irq_restore(flags); | |
471 | } | |
472 | } | |
473 | ||
474 | /*======================================================================* | |
475 | * flush_tlb_range() : flushes a range of pages | |
476 | *======================================================================*/ | |
477 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
478 | unsigned long end) | |
479 | { | |
480 | struct mm_struct *mm; | |
481 | ||
482 | mm = vma->vm_mm; | |
483 | if (mm_context(mm) != NO_CONTEXT) { | |
484 | unsigned long flags; | |
485 | int size; | |
486 | ||
487 | local_irq_save(flags); | |
488 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | |
489 | if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */ | |
490 | mm_context(mm) = NO_CONTEXT; | |
491 | if (mm == current->mm) | |
492 | activate_context(mm); | |
493 | } else { | |
494 | unsigned long asid; | |
495 | ||
496 | asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK; | |
497 | start &= PAGE_MASK; | |
498 | end += (PAGE_SIZE - 1); | |
499 | end &= PAGE_MASK; | |
500 | ||
501 | start |= asid; | |
502 | end |= asid; | |
503 | while (start < end) { | |
504 | __flush_tlb_page(start); | |
505 | start += PAGE_SIZE; | |
506 | } | |
507 | } | |
508 | local_irq_restore(flags); | |
509 | } | |
510 | } | |
511 | ||
512 | /*======================================================================* | |
513 | * flush_tlb_mm() : flushes the specified mm context TLB's | |
514 | *======================================================================*/ | |
515 | void local_flush_tlb_mm(struct mm_struct *mm) | |
516 | { | |
517 | /* Invalidate all TLB of this process. */ | |
518 | /* Instead of invalidating each TLB, we get new MMU context. */ | |
519 | if (mm_context(mm) != NO_CONTEXT) { | |
520 | unsigned long flags; | |
521 | ||
522 | local_irq_save(flags); | |
523 | mm_context(mm) = NO_CONTEXT; | |
524 | if (mm == current->mm) | |
525 | activate_context(mm); | |
526 | local_irq_restore(flags); | |
527 | } | |
528 | } | |
529 | ||
530 | /*======================================================================* | |
531 | * flush_tlb_all() : flushes all processes TLBs | |
532 | *======================================================================*/ | |
533 | void local_flush_tlb_all(void) | |
534 | { | |
535 | unsigned long flags; | |
536 | ||
537 | local_irq_save(flags); | |
538 | __flush_tlb_all(); | |
539 | local_irq_restore(flags); | |
540 | } | |
541 | ||
542 | /*======================================================================* | |
543 | * init_mmu() | |
544 | *======================================================================*/ | |
545 | void __init init_mmu(void) | |
546 | { | |
547 | tlb_entry_i = 0; | |
548 | tlb_entry_d = 0; | |
549 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | |
550 | set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); | |
551 | *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir; | |
552 | } |