]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * MMU fault handling support. | |
3 | * | |
4 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/smp_lock.h> | |
11 | #include <linux/interrupt.h> | |
1f7ad57b | 12 | #include <linux/kprobes.h> |
1da177e4 LT |
13 | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/system.h> | |
17 | #include <asm/uaccess.h> | |
7213b252 | 18 | #include <asm/kdebug.h> |
1da177e4 LT |
19 | |
20 | extern void die (char *, struct pt_regs *, long); | |
21 | ||
22 | /* | |
23 | * This routine is analogous to expand_stack() but instead grows the | |
24 | * register backing store (which grows towards higher addresses). | |
25 | * Since the register backing store is access sequentially, we | |
26 | * disallow growing the RBS by more than a page at a time. Note that | |
27 | * the VM_GROWSUP flag can be set on any VM area but that's fine | |
28 | * because the total process size is still limited by RLIMIT_STACK and | |
29 | * RLIMIT_AS. | |
30 | */ | |
31 | static inline long | |
32 | expand_backing_store (struct vm_area_struct *vma, unsigned long address) | |
33 | { | |
34 | unsigned long grow; | |
35 | ||
36 | grow = PAGE_SIZE >> PAGE_SHIFT; | |
37 | if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur | |
38 | || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur)) | |
39 | return -ENOMEM; | |
40 | vma->vm_end += PAGE_SIZE; | |
41 | vma->vm_mm->total_vm += grow; | |
42 | if (vma->vm_flags & VM_LOCKED) | |
43 | vma->vm_mm->locked_vm += grow; | |
ab50b8ed | 44 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); |
1da177e4 LT |
45 | return 0; |
46 | } | |
47 | ||
48 | /* | |
49 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | |
50 | * (inside region 5, on ia64) and that page is present. | |
51 | */ | |
52 | static int | |
53 | mapped_kernel_page_is_present (unsigned long address) | |
54 | { | |
55 | pgd_t *pgd; | |
56 | pud_t *pud; | |
57 | pmd_t *pmd; | |
58 | pte_t *ptep, pte; | |
59 | ||
60 | pgd = pgd_offset_k(address); | |
61 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | |
62 | return 0; | |
63 | ||
64 | pud = pud_offset(pgd, address); | |
65 | if (pud_none(*pud) || pud_bad(*pud)) | |
66 | return 0; | |
67 | ||
68 | pmd = pmd_offset(pud, address); | |
69 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | |
70 | return 0; | |
71 | ||
72 | ptep = pte_offset_kernel(pmd, address); | |
73 | if (!ptep) | |
74 | return 0; | |
75 | ||
76 | pte = *ptep; | |
77 | return pte_present(pte); | |
78 | } | |
79 | ||
1f7ad57b | 80 | void __kprobes |
1da177e4 LT |
81 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
82 | { | |
83 | int signal = SIGSEGV, code = SEGV_MAPERR; | |
84 | struct vm_area_struct *vma, *prev_vma; | |
85 | struct mm_struct *mm = current->mm; | |
86 | struct siginfo si; | |
87 | unsigned long mask; | |
88 | ||
89 | /* | |
90 | * If we're in an interrupt or have no user context, we must not take the fault.. | |
91 | */ | |
92 | if (in_atomic() || !mm) | |
93 | goto no_context; | |
94 | ||
95 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
96 | /* | |
97 | * If fault is in region 5 and we are in the kernel, we may already | |
98 | * have the mmap_sem (pfn_valid macro is called during mmap). There | |
99 | * is no vma for region 5 addr's anyway, so skip getting the semaphore | |
100 | * and go directly to the exception handling code. | |
101 | */ | |
102 | ||
103 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) | |
104 | goto bad_area_no_up; | |
105 | #endif | |
106 | ||
7213b252 AK |
107 | /* |
108 | * This is to handle the kprobes on user space access instructions | |
109 | */ | |
110 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, | |
111 | SIGSEGV) == NOTIFY_STOP) | |
112 | return; | |
113 | ||
1da177e4 LT |
114 | down_read(&mm->mmap_sem); |
115 | ||
116 | vma = find_vma_prev(mm, address, &prev_vma); | |
117 | if (!vma) | |
118 | goto bad_area; | |
119 | ||
120 | /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */ | |
121 | if (address < vma->vm_start) | |
122 | goto check_expansion; | |
123 | ||
124 | good_area: | |
125 | code = SEGV_ACCERR; | |
126 | ||
127 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ | |
128 | ||
129 | # define VM_READ_BIT 0 | |
130 | # define VM_WRITE_BIT 1 | |
131 | # define VM_EXEC_BIT 2 | |
132 | ||
133 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ | |
134 | || (1 << VM_EXEC_BIT) != VM_EXEC) | |
135 | # error File is out of sync with <linux/mm.h>. Please update. | |
136 | # endif | |
137 | ||
138 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | |
139 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT) | |
140 | | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT)); | |
141 | ||
142 | if ((vma->vm_flags & mask) != mask) | |
143 | goto bad_area; | |
144 | ||
145 | survive: | |
146 | /* | |
147 | * If for any reason at all we couldn't handle the fault, make | |
148 | * sure we exit gracefully rather than endlessly redo the | |
149 | * fault. | |
150 | */ | |
151 | switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { | |
152 | case VM_FAULT_MINOR: | |
153 | ++current->min_flt; | |
154 | break; | |
155 | case VM_FAULT_MAJOR: | |
156 | ++current->maj_flt; | |
157 | break; | |
158 | case VM_FAULT_SIGBUS: | |
159 | /* | |
160 | * We ran out of memory, or some other thing happened | |
161 | * to us that made us unable to handle the page fault | |
162 | * gracefully. | |
163 | */ | |
164 | signal = SIGBUS; | |
165 | goto bad_area; | |
166 | case VM_FAULT_OOM: | |
167 | goto out_of_memory; | |
168 | default: | |
169 | BUG(); | |
170 | } | |
171 | up_read(&mm->mmap_sem); | |
172 | return; | |
173 | ||
174 | check_expansion: | |
175 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { | |
176 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
177 | goto bad_area; | |
178 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
179 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
180 | goto bad_area; | |
181 | if (expand_stack(vma, address)) | |
182 | goto bad_area; | |
183 | } else { | |
184 | vma = prev_vma; | |
185 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
186 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
187 | goto bad_area; | |
188 | if (expand_backing_store(vma, address)) | |
189 | goto bad_area; | |
190 | } | |
191 | goto good_area; | |
192 | ||
193 | bad_area: | |
194 | up_read(&mm->mmap_sem); | |
195 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
196 | bad_area_no_up: | |
197 | #endif | |
198 | if ((isr & IA64_ISR_SP) | |
199 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
200 | { | |
201 | /* | |
202 | * This fault was due to a speculative load or lfetch.fault, set the "ed" | |
203 | * bit in the psr to ensure forward progress. (Target register will get a | |
204 | * NaT for ld.s, lfetch will be canceled.) | |
205 | */ | |
206 | ia64_psr(regs)->ed = 1; | |
207 | return; | |
208 | } | |
209 | if (user_mode(regs)) { | |
210 | si.si_signo = signal; | |
211 | si.si_errno = 0; | |
212 | si.si_code = code; | |
213 | si.si_addr = (void __user *) address; | |
214 | si.si_isr = isr; | |
215 | si.si_flags = __ISR_VALID; | |
216 | force_sig_info(signal, &si, current); | |
217 | return; | |
218 | } | |
219 | ||
220 | no_context: | |
f0a8d3c9 TL |
221 | if ((isr & IA64_ISR_SP) |
222 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
223 | { | |
1da177e4 | 224 | /* |
f0a8d3c9 TL |
225 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
226 | * bit in the psr to ensure forward progress. (Target register will get a | |
227 | * NaT for ld.s, lfetch will be canceled.) | |
1da177e4 LT |
228 | */ |
229 | ia64_psr(regs)->ed = 1; | |
230 | return; | |
231 | } | |
232 | ||
1da177e4 LT |
233 | /* |
234 | * Since we have no vma's for region 5, we might get here even if the address is | |
235 | * valid, due to the VHPT walker inserting a non present translation that becomes | |
236 | * stale. If that happens, the non present fault handler already purged the stale | |
237 | * translation, which fixed the problem. So, we check to see if the translation is | |
238 | * valid, and return if it is. | |
239 | */ | |
240 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | |
241 | return; | |
242 | ||
63028aa7 KU |
243 | if (ia64_done_with_exception(regs)) |
244 | return; | |
245 | ||
1da177e4 LT |
246 | /* |
247 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | |
248 | * with extreme prejudice. | |
249 | */ | |
250 | bust_spinlocks(1); | |
251 | ||
252 | if (address < PAGE_SIZE) | |
253 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); | |
254 | else | |
255 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
256 | "virtual address %016lx\n", address); | |
257 | die("Oops", regs, isr); | |
258 | bust_spinlocks(0); | |
259 | do_exit(SIGKILL); | |
260 | return; | |
261 | ||
262 | out_of_memory: | |
263 | up_read(&mm->mmap_sem); | |
264 | if (current->pid == 1) { | |
265 | yield(); | |
266 | down_read(&mm->mmap_sem); | |
267 | goto survive; | |
268 | } | |
269 | printk(KERN_CRIT "VM: killing process %s\n", current->comm); | |
270 | if (user_mode(regs)) | |
271 | do_exit(SIGKILL); | |
272 | goto no_context; | |
273 | } |