]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * MMU fault handling support. | |
4 | * | |
5 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
3f07c014 | 8 | #include <linux/sched/signal.h> |
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> | |
e7088170 | 11 | #include <linux/extable.h> |
1da177e4 | 12 | #include <linux/interrupt.h> |
1f7ad57b | 13 | #include <linux/kprobes.h> |
1eeb66a1 | 14 | #include <linux/kdebug.h> |
268bb0ce | 15 | #include <linux/prefetch.h> |
70ffdb93 | 16 | #include <linux/uaccess.h> |
b444eed8 | 17 | #include <linux/perf_event.h> |
1da177e4 | 18 | |
1da177e4 | 19 | #include <asm/processor.h> |
82ed1ac9 | 20 | #include <asm/exception.h> |
1da177e4 | 21 | |
620de2f5 | 22 | extern int die(char *, struct pt_regs *, long); |
1da177e4 | 23 | |
1da177e4 LT |
24 | /* |
25 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | |
26 | * (inside region 5, on ia64) and that page is present. | |
27 | */ | |
28 | static int | |
29 | mapped_kernel_page_is_present (unsigned long address) | |
30 | { | |
31 | pgd_t *pgd; | |
c03ab9e3 | 32 | p4d_t *p4d; |
1da177e4 LT |
33 | pud_t *pud; |
34 | pmd_t *pmd; | |
35 | pte_t *ptep, pte; | |
36 | ||
37 | pgd = pgd_offset_k(address); | |
38 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | |
39 | return 0; | |
40 | ||
c03ab9e3 MR |
41 | p4d = p4d_offset(pgd, address); |
42 | if (p4d_none(*p4d) || p4d_bad(*p4d)) | |
43 | return 0; | |
44 | ||
45 | pud = pud_offset(p4d, address); | |
1da177e4 LT |
46 | if (pud_none(*pud) || pud_bad(*pud)) |
47 | return 0; | |
48 | ||
49 | pmd = pmd_offset(pud, address); | |
50 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | |
51 | return 0; | |
52 | ||
53 | ptep = pte_offset_kernel(pmd, address); | |
54 | if (!ptep) | |
55 | return 0; | |
56 | ||
57 | pte = *ptep; | |
58 | return pte_present(pte); | |
59 | } | |
60 | ||
f28fa729 KC |
61 | # define VM_READ_BIT 0 |
62 | # define VM_WRITE_BIT 1 | |
63 | # define VM_EXEC_BIT 2 | |
64 | ||
1f7ad57b | 65 | void __kprobes |
1da177e4 LT |
66 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
67 | { | |
68 | int signal = SIGSEGV, code = SEGV_MAPERR; | |
69 | struct vm_area_struct *vma, *prev_vma; | |
70 | struct mm_struct *mm = current->mm; | |
1da177e4 | 71 | unsigned long mask; |
50a7ca3c | 72 | vm_fault_t fault; |
dde16072 | 73 | unsigned int flags = FAULT_FLAG_DEFAULT; |
f28fa729 KC |
74 | |
75 | mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | |
76 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); | |
77 | ||
da1c55f1 ML |
78 | /* mmap_lock is performance critical.... */ |
79 | prefetchw(&mm->mmap_lock); | |
0ffe9849 | 80 | |
1da177e4 LT |
81 | /* |
82 | * If we're in an interrupt or have no user context, we must not take the fault.. | |
83 | */ | |
70ffdb93 | 84 | if (faulthandler_disabled() || !mm) |
1da177e4 LT |
85 | goto no_context; |
86 | ||
7213b252 AK |
87 | /* |
88 | * This is to handle the kprobes on user space access instructions | |
89 | */ | |
b98cca44 | 90 | if (kprobe_page_fault(regs, TRAP_BRKPT)) |
7213b252 AK |
91 | return; |
92 | ||
759496ba JW |
93 | if (user_mode(regs)) |
94 | flags |= FAULT_FLAG_USER; | |
95 | if (mask & VM_WRITE) | |
96 | flags |= FAULT_FLAG_WRITE; | |
b444eed8 PX |
97 | |
98 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
f28fa729 | 99 | retry: |
d8ed45c5 | 100 | mmap_read_lock(mm); |
1da177e4 LT |
101 | |
102 | vma = find_vma_prev(mm, address, &prev_vma); | |
e8c59c0c | 103 | if (!vma && !prev_vma ) |
1da177e4 LT |
104 | goto bad_area; |
105 | ||
e8c59c0c AB |
106 | /* |
107 | * find_vma_prev() returns vma such that address < vma->vm_end or NULL | |
108 | * | |
109 | * May find no vma, but could be that the last vm area is the | |
110 | * register backing store that needs to expand upwards, in | |
111 | * this case vma will be null, but prev_vma will ne non-null | |
112 | */ | |
113 | if (( !vma && prev_vma ) || (address < vma->vm_start) ) | |
1da177e4 LT |
114 | goto check_expansion; |
115 | ||
116 | good_area: | |
117 | code = SEGV_ACCERR; | |
118 | ||
119 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ | |
120 | ||
1da177e4 LT |
121 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ |
122 | || (1 << VM_EXEC_BIT) != VM_EXEC) | |
123 | # error File is out of sync with <linux/mm.h>. Please update. | |
124 | # endif | |
125 | ||
df67b3da JB |
126 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) |
127 | goto bad_area; | |
128 | ||
1da177e4 LT |
129 | if ((vma->vm_flags & mask) != mask) |
130 | goto bad_area; | |
131 | ||
1da177e4 LT |
132 | /* |
133 | * If for any reason at all we couldn't handle the fault, make | |
134 | * sure we exit gracefully rather than endlessly redo the | |
135 | * fault. | |
136 | */ | |
b444eed8 | 137 | fault = handle_mm_fault(vma, address, flags, regs); |
f28fa729 | 138 | |
4ef87322 | 139 | if (fault_signal_pending(fault, regs)) |
f28fa729 KC |
140 | return; |
141 | ||
83c54070 | 142 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1da177e4 LT |
143 | /* |
144 | * We ran out of memory, or some other thing happened | |
145 | * to us that made us unable to handle the page fault | |
146 | * gracefully. | |
147 | */ | |
83c54070 NP |
148 | if (fault & VM_FAULT_OOM) { |
149 | goto out_of_memory; | |
33692f27 LT |
150 | } else if (fault & VM_FAULT_SIGSEGV) { |
151 | goto bad_area; | |
83c54070 NP |
152 | } else if (fault & VM_FAULT_SIGBUS) { |
153 | signal = SIGBUS; | |
154 | goto bad_area; | |
155 | } | |
1da177e4 LT |
156 | BUG(); |
157 | } | |
f28fa729 KC |
158 | |
159 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
f28fa729 | 160 | if (fault & VM_FAULT_RETRY) { |
45cac65b | 161 | flags |= FAULT_FLAG_TRIED; |
f28fa729 | 162 | |
3e4e28c5 | 163 | /* No need to mmap_read_unlock(mm) as we would |
f28fa729 KC |
164 | * have already released it in __lock_page_or_retry |
165 | * in mm/filemap.c. | |
166 | */ | |
167 | ||
168 | goto retry; | |
169 | } | |
170 | } | |
171 | ||
d8ed45c5 | 172 | mmap_read_unlock(mm); |
1da177e4 LT |
173 | return; |
174 | ||
175 | check_expansion: | |
176 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { | |
e8c59c0c AB |
177 | if (!vma) |
178 | goto bad_area; | |
1da177e4 LT |
179 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
180 | goto bad_area; | |
181 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
182 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
183 | goto bad_area; | |
184 | if (expand_stack(vma, address)) | |
185 | goto bad_area; | |
186 | } else { | |
187 | vma = prev_vma; | |
188 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
189 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
190 | goto bad_area; | |
46dea3d0 HD |
191 | /* |
192 | * Since the register backing store is accessed sequentially, | |
193 | * we disallow growing it by more than a page at a time. | |
194 | */ | |
195 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) | |
196 | goto bad_area; | |
197 | if (expand_upwards(vma, address)) | |
1da177e4 LT |
198 | goto bad_area; |
199 | } | |
200 | goto good_area; | |
201 | ||
202 | bad_area: | |
d8ed45c5 | 203 | mmap_read_unlock(mm); |
1da177e4 LT |
204 | if ((isr & IA64_ISR_SP) |
205 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
206 | { | |
207 | /* | |
208 | * This fault was due to a speculative load or lfetch.fault, set the "ed" | |
209 | * bit in the psr to ensure forward progress. (Target register will get a | |
210 | * NaT for ld.s, lfetch will be canceled.) | |
211 | */ | |
212 | ia64_psr(regs)->ed = 1; | |
213 | return; | |
214 | } | |
215 | if (user_mode(regs)) { | |
a618a275 | 216 | force_sig_fault(signal, code, (void __user *) address, |
2e1661d2 | 217 | 0, __ISR_VALID, isr); |
1da177e4 LT |
218 | return; |
219 | } | |
220 | ||
221 | no_context: | |
f0a8d3c9 TL |
222 | if ((isr & IA64_ISR_SP) |
223 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
224 | { | |
1da177e4 | 225 | /* |
f0a8d3c9 TL |
226 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
227 | * bit in the psr to ensure forward progress. (Target register will get a | |
228 | * NaT for ld.s, lfetch will be canceled.) | |
1da177e4 LT |
229 | */ |
230 | ia64_psr(regs)->ed = 1; | |
231 | return; | |
232 | } | |
233 | ||
1da177e4 LT |
234 | /* |
235 | * Since we have no vma's for region 5, we might get here even if the address is | |
236 | * valid, due to the VHPT walker inserting a non present translation that becomes | |
237 | * stale. If that happens, the non present fault handler already purged the stale | |
238 | * translation, which fixed the problem. So, we check to see if the translation is | |
239 | * valid, and return if it is. | |
240 | */ | |
241 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | |
242 | return; | |
243 | ||
63028aa7 KU |
244 | if (ia64_done_with_exception(regs)) |
245 | return; | |
246 | ||
1da177e4 LT |
247 | /* |
248 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | |
249 | * with extreme prejudice. | |
250 | */ | |
251 | bust_spinlocks(1); | |
252 | ||
253 | if (address < PAGE_SIZE) | |
254 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); | |
255 | else | |
256 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
257 | "virtual address %016lx\n", address); | |
620de2f5 JB |
258 | if (die("Oops", regs, isr)) |
259 | regs = NULL; | |
1da177e4 | 260 | bust_spinlocks(0); |
620de2f5 JB |
261 | if (regs) |
262 | do_exit(SIGKILL); | |
1da177e4 LT |
263 | return; |
264 | ||
265 | out_of_memory: | |
d8ed45c5 | 266 | mmap_read_unlock(mm); |
0c3b96e4 NP |
267 | if (!user_mode(regs)) |
268 | goto no_context; | |
269 | pagefault_out_of_memory(); | |
1da177e4 | 270 | } |