]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1995 Linus Torvalds |
2d4a7167 | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
f8eeb2e6 | 4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar |
1da177e4 | 5 | */ |
a2bcd473 IM |
6 | #include <linux/sched.h> /* test_thread_flag(), ... */ |
7 | #include <linux/kdebug.h> /* oops_begin/end, ... */ | |
4cdf8dbe | 8 | #include <linux/extable.h> /* search_exception_tables */ |
a2bcd473 | 9 | #include <linux/bootmem.h> /* max_low_pfn */ |
9326638c | 10 | #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ |
a2bcd473 | 11 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ |
cdd6c482 | 12 | #include <linux/perf_event.h> /* perf_sw_event */ |
f672b49b | 13 | #include <linux/hugetlb.h> /* hstate_index_to_shift */ |
268bb0ce | 14 | #include <linux/prefetch.h> /* prefetchw */ |
56dd9470 | 15 | #include <linux/context_tracking.h> /* exception_enter(), ... */ |
70ffdb93 | 16 | #include <linux/uaccess.h> /* faulthandler_disabled() */ |
2d4a7167 | 17 | |
019132ff | 18 | #include <asm/cpufeature.h> /* boot_cpu_has, ... */ |
a2bcd473 IM |
19 | #include <asm/traps.h> /* dotraplinkage, ... */ |
20 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | |
f8561296 | 21 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
f40c3300 AL |
22 | #include <asm/fixmap.h> /* VSYSCALL_ADDR */ |
23 | #include <asm/vsyscall.h> /* emulate_vsyscall */ | |
ba3e127e | 24 | #include <asm/vm86.h> /* struct vm86 */ |
019132ff | 25 | #include <asm/mmu_context.h> /* vma_pkey() */ |
1da177e4 | 26 | |
d34603b0 SA |
27 | #define CREATE_TRACE_POINTS |
28 | #include <asm/trace/exceptions.h> | |
29 | ||
33cb5243 | 30 | /* |
2d4a7167 IM |
31 | * Page fault error code bits: |
32 | * | |
33 | * bit 0 == 0: no page found 1: protection fault | |
34 | * bit 1 == 0: read access 1: write access | |
35 | * bit 2 == 0: kernel-mode access 1: user-mode access | |
36 | * bit 3 == 1: use of reserved bit detected | |
37 | * bit 4 == 1: fault was an instruction fetch | |
b3ecd515 | 38 | * bit 5 == 1: protection keys block access |
33cb5243 | 39 | */ |
2d4a7167 IM |
40 | enum x86_pf_error_code { |
41 | ||
42 | PF_PROT = 1 << 0, | |
43 | PF_WRITE = 1 << 1, | |
44 | PF_USER = 1 << 2, | |
45 | PF_RSVD = 1 << 3, | |
46 | PF_INSTR = 1 << 4, | |
b3ecd515 | 47 | PF_PK = 1 << 5, |
2d4a7167 | 48 | }; |
66c58156 | 49 | |
b814d41f | 50 | /* |
b319eed0 IM |
51 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
52 | * handled by mmiotrace: | |
b814d41f | 53 | */ |
9326638c | 54 | static nokprobe_inline int |
62c9295f | 55 | kmmio_fault(struct pt_regs *regs, unsigned long addr) |
86069782 | 56 | { |
0fd0e3da PP |
57 | if (unlikely(is_kmmio_active())) |
58 | if (kmmio_handler(regs, addr) == 1) | |
59 | return -1; | |
0fd0e3da | 60 | return 0; |
86069782 PP |
61 | } |
62 | ||
9326638c | 63 | static nokprobe_inline int kprobes_fault(struct pt_regs *regs) |
1bd858a5 | 64 | { |
74a0b576 CH |
65 | int ret = 0; |
66 | ||
67 | /* kprobe_running() needs smp_processor_id() */ | |
f39b6f0e | 68 | if (kprobes_built_in() && !user_mode(regs)) { |
74a0b576 CH |
69 | preempt_disable(); |
70 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
71 | ret = 1; | |
72 | preempt_enable(); | |
73 | } | |
1bd858a5 | 74 | |
74a0b576 | 75 | return ret; |
33cb5243 | 76 | } |
1bd858a5 | 77 | |
1dc85be0 | 78 | /* |
2d4a7167 IM |
79 | * Prefetch quirks: |
80 | * | |
81 | * 32-bit mode: | |
82 | * | |
83 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
84 | * Check that here and ignore it. | |
1dc85be0 | 85 | * |
2d4a7167 | 86 | * 64-bit mode: |
1dc85be0 | 87 | * |
2d4a7167 IM |
88 | * Sometimes the CPU reports invalid exceptions on prefetch. |
89 | * Check that here and ignore it. | |
90 | * | |
91 | * Opcode checker based on code by Richard Brunner. | |
1dc85be0 | 92 | */ |
107a0367 IM |
93 | static inline int |
94 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
95 | unsigned char opcode, int *prefetch) | |
96 | { | |
97 | unsigned char instr_hi = opcode & 0xf0; | |
98 | unsigned char instr_lo = opcode & 0x0f; | |
99 | ||
100 | switch (instr_hi) { | |
101 | case 0x20: | |
102 | case 0x30: | |
103 | /* | |
104 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
105 | * In X86_64 long mode, the CPU will signal invalid | |
106 | * opcode if some of these prefixes are present so | |
107 | * X86_64 will never get here anyway | |
108 | */ | |
109 | return ((instr_lo & 7) == 0x6); | |
110 | #ifdef CONFIG_X86_64 | |
111 | case 0x40: | |
112 | /* | |
113 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
114 | * Need to figure out under what instruction mode the | |
115 | * instruction was issued. Could check the LDT for lm, | |
116 | * but for now it's good enough to assume that long | |
117 | * mode only uses well known segments or kernel. | |
118 | */ | |
318f5a2a | 119 | return (!user_mode(regs) || user_64bit_mode(regs)); |
107a0367 IM |
120 | #endif |
121 | case 0x60: | |
122 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
123 | return (instr_lo & 0xC) == 0x4; | |
124 | case 0xF0: | |
125 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
126 | return !instr_lo || (instr_lo>>1) == 1; | |
127 | case 0x00: | |
128 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
129 | if (probe_kernel_address(instr, opcode)) | |
130 | return 0; | |
131 | ||
132 | *prefetch = (instr_lo == 0xF) && | |
133 | (opcode == 0x0D || opcode == 0x18); | |
134 | return 0; | |
135 | default: | |
136 | return 0; | |
137 | } | |
138 | } | |
139 | ||
2d4a7167 IM |
140 | static int |
141 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
33cb5243 | 142 | { |
2d4a7167 | 143 | unsigned char *max_instr; |
ab2bf0c1 | 144 | unsigned char *instr; |
33cb5243 | 145 | int prefetch = 0; |
1da177e4 | 146 | |
3085354d IM |
147 | /* |
148 | * If it was a exec (instruction fetch) fault on NX page, then | |
149 | * do not ignore the fault: | |
150 | */ | |
66c58156 | 151 | if (error_code & PF_INSTR) |
1da177e4 | 152 | return 0; |
1dc85be0 | 153 | |
107a0367 | 154 | instr = (void *)convert_ip_to_linear(current, regs); |
f1290ec9 | 155 | max_instr = instr + 15; |
1da177e4 | 156 | |
d31bf07f | 157 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) |
1da177e4 LT |
158 | return 0; |
159 | ||
107a0367 | 160 | while (instr < max_instr) { |
2d4a7167 | 161 | unsigned char opcode; |
1da177e4 | 162 | |
ab2bf0c1 | 163 | if (probe_kernel_address(instr, opcode)) |
33cb5243 | 164 | break; |
1da177e4 | 165 | |
1da177e4 LT |
166 | instr++; |
167 | ||
107a0367 | 168 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
1da177e4 | 169 | break; |
1da177e4 LT |
170 | } |
171 | return prefetch; | |
172 | } | |
173 | ||
019132ff DH |
174 | /* |
175 | * A protection key fault means that the PKRU value did not allow | |
176 | * access to some PTE. Userspace can figure out what PKRU was | |
177 | * from the XSAVE state, and this function fills out a field in | |
178 | * siginfo so userspace can discover which protection key was set | |
179 | * on the PTE. | |
180 | * | |
181 | * If we get here, we know that the hardware signaled a PF_PK | |
182 | * fault and that there was a VMA once we got in the fault | |
183 | * handler. It does *not* guarantee that the VMA we find here | |
184 | * was the one that we faulted on. | |
185 | * | |
186 | * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); | |
187 | * 2. T1 : set PKRU to deny access to pkey=4, touches page | |
188 | * 3. T1 : faults... | |
189 | * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); | |
190 | * 5. T1 : enters fault handler, takes mmap_sem, etc... | |
191 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really | |
192 | * faulted on a pte with its pkey=4. | |
193 | */ | |
194 | static void fill_sig_info_pkey(int si_code, siginfo_t *info, | |
195 | struct vm_area_struct *vma) | |
196 | { | |
197 | /* This is effectively an #ifdef */ | |
198 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) | |
199 | return; | |
200 | ||
201 | /* Fault not from Protection Keys: nothing to do */ | |
202 | if (si_code != SEGV_PKUERR) | |
203 | return; | |
204 | /* | |
205 | * force_sig_info_fault() is called from a number of | |
206 | * contexts, some of which have a VMA and some of which | |
207 | * do not. The PF_PK handing happens after we have a | |
208 | * valid VMA, so we should never reach this without a | |
209 | * valid VMA. | |
210 | */ | |
211 | if (!vma) { | |
212 | WARN_ONCE(1, "PKU fault with no VMA passed in"); | |
213 | info->si_pkey = 0; | |
214 | return; | |
215 | } | |
216 | /* | |
217 | * si_pkey should be thought of as a strong hint, but not | |
218 | * absolutely guranteed to be 100% accurate because of | |
219 | * the race explained above. | |
220 | */ | |
221 | info->si_pkey = vma_pkey(vma); | |
222 | } | |
223 | ||
2d4a7167 IM |
224 | static void |
225 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
7b2d0dba DH |
226 | struct task_struct *tsk, struct vm_area_struct *vma, |
227 | int fault) | |
c4aba4a8 | 228 | { |
f672b49b | 229 | unsigned lsb = 0; |
c4aba4a8 HH |
230 | siginfo_t info; |
231 | ||
2d4a7167 IM |
232 | info.si_signo = si_signo; |
233 | info.si_errno = 0; | |
234 | info.si_code = si_code; | |
235 | info.si_addr = (void __user *)address; | |
f672b49b AK |
236 | if (fault & VM_FAULT_HWPOISON_LARGE) |
237 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
238 | if (fault & VM_FAULT_HWPOISON) | |
239 | lsb = PAGE_SHIFT; | |
240 | info.si_addr_lsb = lsb; | |
2d4a7167 | 241 | |
019132ff DH |
242 | fill_sig_info_pkey(si_code, &info, vma); |
243 | ||
c4aba4a8 HH |
244 | force_sig_info(si_signo, &info, tsk); |
245 | } | |
246 | ||
f2f13a85 IM |
247 | DEFINE_SPINLOCK(pgd_lock); |
248 | LIST_HEAD(pgd_list); | |
249 | ||
250 | #ifdef CONFIG_X86_32 | |
251 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
33cb5243 | 252 | { |
f2f13a85 IM |
253 | unsigned index = pgd_index(address); |
254 | pgd_t *pgd_k; | |
255 | pud_t *pud, *pud_k; | |
256 | pmd_t *pmd, *pmd_k; | |
2d4a7167 | 257 | |
f2f13a85 IM |
258 | pgd += index; |
259 | pgd_k = init_mm.pgd + index; | |
260 | ||
261 | if (!pgd_present(*pgd_k)) | |
262 | return NULL; | |
263 | ||
264 | /* | |
265 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
266 | * and redundant with the set_pmd() on non-PAE. As would | |
267 | * set_pud. | |
268 | */ | |
269 | pud = pud_offset(pgd, address); | |
270 | pud_k = pud_offset(pgd_k, address); | |
271 | if (!pud_present(*pud_k)) | |
272 | return NULL; | |
273 | ||
274 | pmd = pmd_offset(pud, address); | |
275 | pmd_k = pmd_offset(pud_k, address); | |
276 | if (!pmd_present(*pmd_k)) | |
277 | return NULL; | |
278 | ||
b8bcfe99 | 279 | if (!pmd_present(*pmd)) |
f2f13a85 | 280 | set_pmd(pmd, *pmd_k); |
b8bcfe99 | 281 | else |
f2f13a85 | 282 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
f2f13a85 IM |
283 | |
284 | return pmd_k; | |
285 | } | |
286 | ||
287 | void vmalloc_sync_all(void) | |
288 | { | |
289 | unsigned long address; | |
290 | ||
291 | if (SHARED_KERNEL_PMD) | |
292 | return; | |
293 | ||
294 | for (address = VMALLOC_START & PMD_MASK; | |
dc4fac84 | 295 | address >= TASK_SIZE_MAX && address < FIXADDR_TOP; |
f2f13a85 | 296 | address += PMD_SIZE) { |
f2f13a85 IM |
297 | struct page *page; |
298 | ||
a79e53d8 | 299 | spin_lock(&pgd_lock); |
f2f13a85 | 300 | list_for_each_entry(page, &pgd_list, lru) { |
617d34d9 | 301 | spinlock_t *pgt_lock; |
f01f7c56 | 302 | pmd_t *ret; |
617d34d9 | 303 | |
a79e53d8 | 304 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
305 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
306 | ||
307 | spin_lock(pgt_lock); | |
308 | ret = vmalloc_sync_one(page_address(page), address); | |
309 | spin_unlock(pgt_lock); | |
310 | ||
311 | if (!ret) | |
f2f13a85 IM |
312 | break; |
313 | } | |
a79e53d8 | 314 | spin_unlock(&pgd_lock); |
f2f13a85 IM |
315 | } |
316 | } | |
317 | ||
318 | /* | |
319 | * 32-bit: | |
320 | * | |
321 | * Handle a fault on the vmalloc or module mapping area | |
322 | */ | |
9326638c | 323 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
324 | { |
325 | unsigned long pgd_paddr; | |
326 | pmd_t *pmd_k; | |
327 | pte_t *pte_k; | |
328 | ||
329 | /* Make sure we are in vmalloc area: */ | |
330 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
331 | return -1; | |
332 | ||
ebc8827f FW |
333 | WARN_ON_ONCE(in_nmi()); |
334 | ||
f2f13a85 IM |
335 | /* |
336 | * Synchronize this task's top level page-table | |
337 | * with the 'reference' page table. | |
338 | * | |
339 | * Do _not_ use "current" here. We might be inside | |
340 | * an interrupt in the middle of a task switch.. | |
341 | */ | |
342 | pgd_paddr = read_cr3(); | |
343 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
344 | if (!pmd_k) | |
345 | return -1; | |
346 | ||
f4eafd8b TK |
347 | if (pmd_huge(*pmd_k)) |
348 | return 0; | |
349 | ||
f2f13a85 IM |
350 | pte_k = pte_offset_kernel(pmd_k, address); |
351 | if (!pte_present(*pte_k)) | |
352 | return -1; | |
353 | ||
354 | return 0; | |
355 | } | |
9326638c | 356 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 IM |
357 | |
358 | /* | |
359 | * Did it hit the DOS screen memory VA from vm86 mode? | |
360 | */ | |
361 | static inline void | |
362 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
363 | struct task_struct *tsk) | |
364 | { | |
9fda6a06 | 365 | #ifdef CONFIG_VM86 |
f2f13a85 IM |
366 | unsigned long bit; |
367 | ||
9fda6a06 | 368 | if (!v8086_mode(regs) || !tsk->thread.vm86) |
f2f13a85 IM |
369 | return; |
370 | ||
371 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
372 | if (bit < 32) | |
9fda6a06 BG |
373 | tsk->thread.vm86->screen_bitmap |= 1 << bit; |
374 | #endif | |
33cb5243 | 375 | } |
1da177e4 | 376 | |
087975b0 | 377 | static bool low_pfn(unsigned long pfn) |
1da177e4 | 378 | { |
087975b0 AM |
379 | return pfn < max_low_pfn; |
380 | } | |
1156e098 | 381 | |
087975b0 AM |
382 | static void dump_pagetable(unsigned long address) |
383 | { | |
384 | pgd_t *base = __va(read_cr3()); | |
385 | pgd_t *pgd = &base[pgd_index(address)]; | |
386 | pmd_t *pmd; | |
387 | pte_t *pte; | |
2d4a7167 | 388 | |
1156e098 | 389 | #ifdef CONFIG_X86_PAE |
087975b0 AM |
390 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
391 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) | |
392 | goto out; | |
1156e098 | 393 | #endif |
087975b0 AM |
394 | pmd = pmd_offset(pud_offset(pgd, address), address); |
395 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | |
1156e098 HH |
396 | |
397 | /* | |
398 | * We must not directly access the pte in the highpte | |
399 | * case if the page table is located in highmem. | |
400 | * And let's rather not kmap-atomic the pte, just in case | |
2d4a7167 | 401 | * it's allocated already: |
1156e098 | 402 | */ |
087975b0 AM |
403 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
404 | goto out; | |
1156e098 | 405 | |
087975b0 AM |
406 | pte = pte_offset_kernel(pmd, address); |
407 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | |
408 | out: | |
1156e098 | 409 | printk("\n"); |
f2f13a85 IM |
410 | } |
411 | ||
412 | #else /* CONFIG_X86_64: */ | |
413 | ||
414 | void vmalloc_sync_all(void) | |
415 | { | |
9661d5bc | 416 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0); |
f2f13a85 IM |
417 | } |
418 | ||
419 | /* | |
420 | * 64-bit: | |
421 | * | |
422 | * Handle a fault on the vmalloc area | |
f2f13a85 | 423 | */ |
9326638c | 424 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
425 | { |
426 | pgd_t *pgd, *pgd_ref; | |
427 | pud_t *pud, *pud_ref; | |
428 | pmd_t *pmd, *pmd_ref; | |
429 | pte_t *pte, *pte_ref; | |
430 | ||
431 | /* Make sure we are in vmalloc area: */ | |
432 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
433 | return -1; | |
434 | ||
ebc8827f FW |
435 | WARN_ON_ONCE(in_nmi()); |
436 | ||
f2f13a85 IM |
437 | /* |
438 | * Copy kernel mappings over when needed. This can also | |
439 | * happen within a race in page table update. In the later | |
440 | * case just flush: | |
441 | */ | |
46aea387 | 442 | pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address); |
f2f13a85 IM |
443 | pgd_ref = pgd_offset_k(address); |
444 | if (pgd_none(*pgd_ref)) | |
445 | return -1; | |
446 | ||
1160c277 | 447 | if (pgd_none(*pgd)) { |
f2f13a85 | 448 | set_pgd(pgd, *pgd_ref); |
1160c277 SK |
449 | arch_flush_lazy_mmu_mode(); |
450 | } else { | |
f2f13a85 | 451 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
1160c277 | 452 | } |
f2f13a85 IM |
453 | |
454 | /* | |
455 | * Below here mismatches are bugs because these lower tables | |
456 | * are shared: | |
457 | */ | |
458 | ||
459 | pud = pud_offset(pgd, address); | |
460 | pud_ref = pud_offset(pgd_ref, address); | |
461 | if (pud_none(*pud_ref)) | |
462 | return -1; | |
463 | ||
f4eafd8b | 464 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) |
f2f13a85 IM |
465 | BUG(); |
466 | ||
f4eafd8b TK |
467 | if (pud_huge(*pud)) |
468 | return 0; | |
469 | ||
f2f13a85 IM |
470 | pmd = pmd_offset(pud, address); |
471 | pmd_ref = pmd_offset(pud_ref, address); | |
472 | if (pmd_none(*pmd_ref)) | |
473 | return -1; | |
474 | ||
f4eafd8b | 475 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) |
f2f13a85 IM |
476 | BUG(); |
477 | ||
f4eafd8b TK |
478 | if (pmd_huge(*pmd)) |
479 | return 0; | |
480 | ||
f2f13a85 IM |
481 | pte_ref = pte_offset_kernel(pmd_ref, address); |
482 | if (!pte_present(*pte_ref)) | |
483 | return -1; | |
484 | ||
485 | pte = pte_offset_kernel(pmd, address); | |
486 | ||
487 | /* | |
488 | * Don't use pte_page here, because the mappings can point | |
489 | * outside mem_map, and the NUMA hash lookup cannot handle | |
490 | * that: | |
491 | */ | |
492 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
493 | BUG(); | |
494 | ||
495 | return 0; | |
496 | } | |
9326638c | 497 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 | 498 | |
e05139f2 | 499 | #ifdef CONFIG_CPU_SUP_AMD |
f2f13a85 | 500 | static const char errata93_warning[] = |
ad361c98 JP |
501 | KERN_ERR |
502 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
503 | "******* Working around it, but it may cause SEGVs or burn power.\n" | |
504 | "******* Please consider a BIOS update.\n" | |
505 | "******* Disabling USB legacy in the BIOS may also help.\n"; | |
e05139f2 | 506 | #endif |
f2f13a85 IM |
507 | |
508 | /* | |
509 | * No vm86 mode in 64-bit mode: | |
510 | */ | |
511 | static inline void | |
512 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
513 | struct task_struct *tsk) | |
514 | { | |
515 | } | |
516 | ||
517 | static int bad_address(void *p) | |
518 | { | |
519 | unsigned long dummy; | |
520 | ||
521 | return probe_kernel_address((unsigned long *)p, dummy); | |
522 | } | |
523 | ||
524 | static void dump_pagetable(unsigned long address) | |
525 | { | |
087975b0 AM |
526 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
527 | pgd_t *pgd = base + pgd_index(address); | |
1da177e4 LT |
528 | pud_t *pud; |
529 | pmd_t *pmd; | |
530 | pte_t *pte; | |
531 | ||
2d4a7167 IM |
532 | if (bad_address(pgd)) |
533 | goto bad; | |
534 | ||
d646bce4 | 535 | printk("PGD %lx ", pgd_val(*pgd)); |
2d4a7167 IM |
536 | |
537 | if (!pgd_present(*pgd)) | |
538 | goto out; | |
1da177e4 | 539 | |
d2ae5b5f | 540 | pud = pud_offset(pgd, address); |
2d4a7167 IM |
541 | if (bad_address(pud)) |
542 | goto bad; | |
543 | ||
1da177e4 | 544 | printk("PUD %lx ", pud_val(*pud)); |
b5360222 | 545 | if (!pud_present(*pud) || pud_large(*pud)) |
2d4a7167 | 546 | goto out; |
1da177e4 LT |
547 | |
548 | pmd = pmd_offset(pud, address); | |
2d4a7167 IM |
549 | if (bad_address(pmd)) |
550 | goto bad; | |
551 | ||
1da177e4 | 552 | printk("PMD %lx ", pmd_val(*pmd)); |
2d4a7167 IM |
553 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
554 | goto out; | |
1da177e4 LT |
555 | |
556 | pte = pte_offset_kernel(pmd, address); | |
2d4a7167 IM |
557 | if (bad_address(pte)) |
558 | goto bad; | |
559 | ||
33cb5243 | 560 | printk("PTE %lx", pte_val(*pte)); |
2d4a7167 | 561 | out: |
1da177e4 LT |
562 | printk("\n"); |
563 | return; | |
564 | bad: | |
565 | printk("BAD\n"); | |
8c938f9f IM |
566 | } |
567 | ||
f2f13a85 | 568 | #endif /* CONFIG_X86_64 */ |
1da177e4 | 569 | |
2d4a7167 IM |
570 | /* |
571 | * Workaround for K8 erratum #93 & buggy BIOS. | |
572 | * | |
573 | * BIOS SMM functions are required to use a specific workaround | |
574 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
575 | * | |
576 | * A lot of BIOS that didn't get tested properly miss this. | |
577 | * | |
578 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
579 | * Try to work around it here. | |
580 | * | |
581 | * Note we only handle faults in kernel here. | |
582 | * Does nothing on 32-bit. | |
fdfe8aa8 | 583 | */ |
33cb5243 | 584 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
1da177e4 | 585 | { |
e05139f2 JB |
586 | #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) |
587 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD | |
588 | || boot_cpu_data.x86 != 0xf) | |
589 | return 0; | |
590 | ||
65ea5b03 | 591 | if (address != regs->ip) |
1da177e4 | 592 | return 0; |
2d4a7167 | 593 | |
33cb5243 | 594 | if ((address >> 32) != 0) |
1da177e4 | 595 | return 0; |
2d4a7167 | 596 | |
1da177e4 | 597 | address |= 0xffffffffUL << 32; |
33cb5243 HH |
598 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
599 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
a454ab31 | 600 | printk_once(errata93_warning); |
65ea5b03 | 601 | regs->ip = address; |
1da177e4 LT |
602 | return 1; |
603 | } | |
fdfe8aa8 | 604 | #endif |
1da177e4 | 605 | return 0; |
33cb5243 | 606 | } |
1da177e4 | 607 | |
35f3266f | 608 | /* |
2d4a7167 IM |
609 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
610 | * to illegal addresses >4GB. | |
611 | * | |
612 | * We catch this in the page fault handler because these addresses | |
613 | * are not reachable. Just detect this case and return. Any code | |
35f3266f HH |
614 | * segment in LDT is compatibility mode. |
615 | */ | |
616 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
617 | { | |
618 | #ifdef CONFIG_X86_64 | |
2d4a7167 | 619 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
35f3266f HH |
620 | return 1; |
621 | #endif | |
622 | return 0; | |
623 | } | |
624 | ||
29caf2f9 HH |
625 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
626 | { | |
627 | #ifdef CONFIG_X86_F00F_BUG | |
628 | unsigned long nr; | |
2d4a7167 | 629 | |
29caf2f9 | 630 | /* |
2d4a7167 | 631 | * Pentium F0 0F C7 C8 bug workaround: |
29caf2f9 | 632 | */ |
e2604b49 | 633 | if (boot_cpu_has_bug(X86_BUG_F00F)) { |
29caf2f9 HH |
634 | nr = (address - idt_descr.address) >> 3; |
635 | ||
636 | if (nr == 6) { | |
637 | do_invalid_op(regs, 0); | |
638 | return 1; | |
639 | } | |
640 | } | |
641 | #endif | |
642 | return 0; | |
643 | } | |
644 | ||
8f766149 IM |
645 | static const char nx_warning[] = KERN_CRIT |
646 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | |
eff50c34 JK |
647 | static const char smep_warning[] = KERN_CRIT |
648 | "unable to execute userspace code (SMEP?) (uid: %d)\n"; | |
8f766149 | 649 | |
2d4a7167 IM |
650 | static void |
651 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
652 | unsigned long address) | |
b3279c7f | 653 | { |
1156e098 HH |
654 | if (!oops_may_print()) |
655 | return; | |
656 | ||
1156e098 | 657 | if (error_code & PF_INSTR) { |
93809be8 | 658 | unsigned int level; |
426e34cc MF |
659 | pgd_t *pgd; |
660 | pte_t *pte; | |
2d4a7167 | 661 | |
426e34cc MF |
662 | pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
663 | pgd += pgd_index(address); | |
664 | ||
665 | pte = lookup_address_in_pgd(pgd, address, &level); | |
1156e098 | 666 | |
8f766149 | 667 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
078de5f7 | 668 | printk(nx_warning, from_kuid(&init_user_ns, current_uid())); |
eff50c34 JK |
669 | if (pte && pte_present(*pte) && pte_exec(*pte) && |
670 | (pgd_flags(*pgd) & _PAGE_USER) && | |
1e02ce4c | 671 | (__read_cr4() & X86_CR4_SMEP)) |
eff50c34 | 672 | printk(smep_warning, from_kuid(&init_user_ns, current_uid())); |
1156e098 | 673 | } |
1156e098 | 674 | |
19f0dda9 | 675 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
b3279c7f | 676 | if (address < PAGE_SIZE) |
19f0dda9 | 677 | printk(KERN_CONT "NULL pointer dereference"); |
b3279c7f | 678 | else |
19f0dda9 | 679 | printk(KERN_CONT "paging request"); |
2d4a7167 | 680 | |
f294a8ce | 681 | printk(KERN_CONT " at %p\n", (void *) address); |
19f0dda9 | 682 | printk(KERN_ALERT "IP:"); |
5f01c988 | 683 | printk_address(regs->ip); |
2d4a7167 | 684 | |
b3279c7f HH |
685 | dump_pagetable(address); |
686 | } | |
687 | ||
2d4a7167 IM |
688 | static noinline void |
689 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
690 | unsigned long address) | |
1da177e4 | 691 | { |
2d4a7167 IM |
692 | struct task_struct *tsk; |
693 | unsigned long flags; | |
694 | int sig; | |
695 | ||
696 | flags = oops_begin(); | |
697 | tsk = current; | |
698 | sig = SIGKILL; | |
1209140c | 699 | |
1da177e4 | 700 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
92181f19 | 701 | tsk->comm, address); |
1da177e4 | 702 | dump_pagetable(address); |
2d4a7167 IM |
703 | |
704 | tsk->thread.cr2 = address; | |
51e7dc70 | 705 | tsk->thread.trap_nr = X86_TRAP_PF; |
2d4a7167 IM |
706 | tsk->thread.error_code = error_code; |
707 | ||
22f5991c | 708 | if (__die("Bad pagetable", regs, error_code)) |
874d93d1 | 709 | sig = 0; |
2d4a7167 | 710 | |
874d93d1 | 711 | oops_end(flags, regs, sig); |
1da177e4 LT |
712 | } |
713 | ||
2d4a7167 IM |
714 | static noinline void |
715 | no_context(struct pt_regs *regs, unsigned long error_code, | |
4fc34901 | 716 | unsigned long address, int signal, int si_code) |
92181f19 NP |
717 | { |
718 | struct task_struct *tsk = current; | |
92181f19 NP |
719 | unsigned long flags; |
720 | int sig; | |
7b2d0dba DH |
721 | /* No context means no VMA to pass down */ |
722 | struct vm_area_struct *vma = NULL; | |
92181f19 | 723 | |
2d4a7167 | 724 | /* Are we prepared to handle this kernel fault? */ |
548acf19 | 725 | if (fixup_exception(regs, X86_TRAP_PF)) { |
c026b359 PZ |
726 | /* |
727 | * Any interrupt that takes a fault gets the fixup. This makes | |
728 | * the below recursive fault logic only apply to a faults from | |
729 | * task context. | |
730 | */ | |
731 | if (in_interrupt()) | |
732 | return; | |
733 | ||
734 | /* | |
735 | * Per the above we're !in_interrupt(), aka. task context. | |
736 | * | |
737 | * In this case we need to make sure we're not recursively | |
738 | * faulting through the emulate_vsyscall() logic. | |
739 | */ | |
2a53ccbc | 740 | if (current->thread.sig_on_uaccess_err && signal) { |
51e7dc70 | 741 | tsk->thread.trap_nr = X86_TRAP_PF; |
4fc34901 AL |
742 | tsk->thread.error_code = error_code | PF_USER; |
743 | tsk->thread.cr2 = address; | |
744 | ||
745 | /* XXX: hwpoison faults will set the wrong code. */ | |
7b2d0dba DH |
746 | force_sig_info_fault(signal, si_code, address, |
747 | tsk, vma, 0); | |
4fc34901 | 748 | } |
c026b359 PZ |
749 | |
750 | /* | |
751 | * Barring that, we can do the fixup and be happy. | |
752 | */ | |
92181f19 | 753 | return; |
4fc34901 | 754 | } |
92181f19 | 755 | |
6271cfdf AL |
756 | #ifdef CONFIG_VMAP_STACK |
757 | /* | |
758 | * Stack overflow? During boot, we can fault near the initial | |
759 | * stack in the direct map, but that's not an overflow -- check | |
760 | * that we're in vmalloc space to avoid this. | |
761 | */ | |
762 | if (is_vmalloc_addr((void *)address) && | |
763 | (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || | |
764 | address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { | |
765 | register void *__sp asm("rsp"); | |
766 | unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); | |
767 | /* | |
768 | * We're likely to be running with very little stack space | |
769 | * left. It's plausible that we'd hit this condition but | |
770 | * double-fault even before we get this far, in which case | |
771 | * we're fine: the double-fault handler will deal with it. | |
772 | * | |
773 | * We don't want to make it all the way into the oops code | |
774 | * and then double-fault, though, because we're likely to | |
775 | * break the console driver and lose most of the stack dump. | |
776 | */ | |
777 | asm volatile ("movq %[stack], %%rsp\n\t" | |
778 | "call handle_stack_overflow\n\t" | |
779 | "1: jmp 1b" | |
780 | : "+r" (__sp) | |
781 | : "D" ("kernel stack overflow (page fault)"), | |
782 | "S" (regs), "d" (address), | |
783 | [stack] "rm" (stack)); | |
784 | unreachable(); | |
785 | } | |
786 | #endif | |
787 | ||
92181f19 | 788 | /* |
2d4a7167 IM |
789 | * 32-bit: |
790 | * | |
791 | * Valid to do another page fault here, because if this fault | |
792 | * had been triggered by is_prefetch fixup_exception would have | |
793 | * handled it. | |
794 | * | |
795 | * 64-bit: | |
92181f19 | 796 | * |
2d4a7167 | 797 | * Hall of shame of CPU/BIOS bugs. |
92181f19 NP |
798 | */ |
799 | if (is_prefetch(regs, error_code, address)) | |
800 | return; | |
801 | ||
802 | if (is_errata93(regs, address)) | |
803 | return; | |
804 | ||
805 | /* | |
806 | * Oops. The kernel tried to access some bad page. We'll have to | |
2d4a7167 | 807 | * terminate things with extreme prejudice: |
92181f19 | 808 | */ |
92181f19 | 809 | flags = oops_begin(); |
92181f19 NP |
810 | |
811 | show_fault_oops(regs, error_code, address); | |
812 | ||
a70857e4 | 813 | if (task_stack_end_corrupted(tsk)) |
b0f4c4b3 | 814 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
19803078 | 815 | |
1cc99544 | 816 | tsk->thread.cr2 = address; |
51e7dc70 | 817 | tsk->thread.trap_nr = X86_TRAP_PF; |
1cc99544 | 818 | tsk->thread.error_code = error_code; |
92181f19 | 819 | |
92181f19 NP |
820 | sig = SIGKILL; |
821 | if (__die("Oops", regs, error_code)) | |
822 | sig = 0; | |
2d4a7167 | 823 | |
92181f19 | 824 | /* Executive summary in case the body of the oops scrolled away */ |
b0f4c4b3 | 825 | printk(KERN_DEFAULT "CR2: %016lx\n", address); |
2d4a7167 | 826 | |
92181f19 | 827 | oops_end(flags, regs, sig); |
92181f19 NP |
828 | } |
829 | ||
2d4a7167 IM |
830 | /* |
831 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
832 | * sysctl is set: | |
833 | */ | |
834 | static inline void | |
835 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
836 | unsigned long address, struct task_struct *tsk) | |
837 | { | |
838 | if (!unhandled_signal(tsk, SIGSEGV)) | |
839 | return; | |
840 | ||
841 | if (!printk_ratelimit()) | |
842 | return; | |
843 | ||
a1a08d1c | 844 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
2d4a7167 IM |
845 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
846 | tsk->comm, task_pid_nr(tsk), address, | |
847 | (void *)regs->ip, (void *)regs->sp, error_code); | |
848 | ||
849 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
850 | ||
851 | printk(KERN_CONT "\n"); | |
852 | } | |
853 | ||
854 | static void | |
855 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba DH |
856 | unsigned long address, struct vm_area_struct *vma, |
857 | int si_code) | |
92181f19 NP |
858 | { |
859 | struct task_struct *tsk = current; | |
860 | ||
861 | /* User mode accesses just cause a SIGSEGV */ | |
862 | if (error_code & PF_USER) { | |
863 | /* | |
2d4a7167 | 864 | * It's possible to have interrupts off here: |
92181f19 NP |
865 | */ |
866 | local_irq_enable(); | |
867 | ||
868 | /* | |
869 | * Valid to do another page fault here because this one came | |
2d4a7167 | 870 | * from user space: |
92181f19 NP |
871 | */ |
872 | if (is_prefetch(regs, error_code, address)) | |
873 | return; | |
874 | ||
875 | if (is_errata100(regs, address)) | |
876 | return; | |
877 | ||
3ae36655 AL |
878 | #ifdef CONFIG_X86_64 |
879 | /* | |
880 | * Instruction fetch faults in the vsyscall page might need | |
881 | * emulation. | |
882 | */ | |
883 | if (unlikely((error_code & PF_INSTR) && | |
f40c3300 | 884 | ((address & ~0xfff) == VSYSCALL_ADDR))) { |
3ae36655 AL |
885 | if (emulate_vsyscall(regs, address)) |
886 | return; | |
887 | } | |
888 | #endif | |
dc4fac84 AL |
889 | |
890 | /* | |
891 | * To avoid leaking information about the kernel page table | |
892 | * layout, pretend that user-mode accesses to kernel addresses | |
893 | * are always protection faults. | |
894 | */ | |
895 | if (address >= TASK_SIZE_MAX) | |
e575a86f | 896 | error_code |= PF_PROT; |
3ae36655 | 897 | |
e575a86f | 898 | if (likely(show_unhandled_signals)) |
2d4a7167 IM |
899 | show_signal_msg(regs, error_code, address, tsk); |
900 | ||
2d4a7167 | 901 | tsk->thread.cr2 = address; |
e575a86f | 902 | tsk->thread.error_code = error_code; |
51e7dc70 | 903 | tsk->thread.trap_nr = X86_TRAP_PF; |
92181f19 | 904 | |
7b2d0dba | 905 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); |
2d4a7167 | 906 | |
92181f19 NP |
907 | return; |
908 | } | |
909 | ||
910 | if (is_f00f_bug(regs, address)) | |
911 | return; | |
912 | ||
4fc34901 | 913 | no_context(regs, error_code, address, SIGSEGV, si_code); |
92181f19 NP |
914 | } |
915 | ||
2d4a7167 IM |
916 | static noinline void |
917 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 918 | unsigned long address, struct vm_area_struct *vma) |
92181f19 | 919 | { |
7b2d0dba | 920 | __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); |
92181f19 NP |
921 | } |
922 | ||
2d4a7167 IM |
923 | static void |
924 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 925 | unsigned long address, struct vm_area_struct *vma, int si_code) |
92181f19 NP |
926 | { |
927 | struct mm_struct *mm = current->mm; | |
928 | ||
929 | /* | |
930 | * Something tried to access memory that isn't in our memory map.. | |
931 | * Fix it, but check if it's kernel or user first.. | |
932 | */ | |
933 | up_read(&mm->mmap_sem); | |
934 | ||
7b2d0dba | 935 | __bad_area_nosemaphore(regs, error_code, address, vma, si_code); |
92181f19 NP |
936 | } |
937 | ||
2d4a7167 IM |
938 | static noinline void |
939 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 | 940 | { |
7b2d0dba | 941 | __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); |
92181f19 NP |
942 | } |
943 | ||
33a709b2 DH |
944 | static inline bool bad_area_access_from_pkeys(unsigned long error_code, |
945 | struct vm_area_struct *vma) | |
946 | { | |
07f146f5 DH |
947 | /* This code is always called on the current mm */ |
948 | bool foreign = false; | |
949 | ||
33a709b2 DH |
950 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
951 | return false; | |
952 | if (error_code & PF_PK) | |
953 | return true; | |
07f146f5 | 954 | /* this checks permission keys on the VMA: */ |
d61172b4 DH |
955 | if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), |
956 | (error_code & PF_INSTR), foreign)) | |
07f146f5 | 957 | return true; |
33a709b2 | 958 | return false; |
92181f19 NP |
959 | } |
960 | ||
2d4a7167 IM |
961 | static noinline void |
962 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 963 | unsigned long address, struct vm_area_struct *vma) |
92181f19 | 964 | { |
019132ff DH |
965 | /* |
966 | * This OSPKE check is not strictly necessary at runtime. | |
967 | * But, doing it this way allows compiler optimizations | |
968 | * if pkeys are compiled out. | |
969 | */ | |
33a709b2 | 970 | if (bad_area_access_from_pkeys(error_code, vma)) |
019132ff DH |
971 | __bad_area(regs, error_code, address, vma, SEGV_PKUERR); |
972 | else | |
973 | __bad_area(regs, error_code, address, vma, SEGV_ACCERR); | |
92181f19 NP |
974 | } |
975 | ||
2d4a7167 | 976 | static void |
a6e04aa9 | 977 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
7b2d0dba | 978 | struct vm_area_struct *vma, unsigned int fault) |
92181f19 NP |
979 | { |
980 | struct task_struct *tsk = current; | |
a6e04aa9 | 981 | int code = BUS_ADRERR; |
92181f19 | 982 | |
2d4a7167 | 983 | /* Kernel mode? Handle exceptions or die: */ |
96054569 | 984 | if (!(error_code & PF_USER)) { |
4fc34901 | 985 | no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); |
96054569 LT |
986 | return; |
987 | } | |
2d4a7167 | 988 | |
cd1b68f0 | 989 | /* User-space => ok to do another page fault: */ |
92181f19 NP |
990 | if (is_prefetch(regs, error_code, address)) |
991 | return; | |
2d4a7167 IM |
992 | |
993 | tsk->thread.cr2 = address; | |
994 | tsk->thread.error_code = error_code; | |
51e7dc70 | 995 | tsk->thread.trap_nr = X86_TRAP_PF; |
2d4a7167 | 996 | |
a6e04aa9 | 997 | #ifdef CONFIG_MEMORY_FAILURE |
f672b49b | 998 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
a6e04aa9 AK |
999 | printk(KERN_ERR |
1000 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | |
1001 | tsk->comm, tsk->pid, address); | |
1002 | code = BUS_MCEERR_AR; | |
1003 | } | |
1004 | #endif | |
7b2d0dba | 1005 | force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); |
92181f19 NP |
1006 | } |
1007 | ||
3a13c4d7 | 1008 | static noinline void |
2d4a7167 | 1009 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
7b2d0dba DH |
1010 | unsigned long address, struct vm_area_struct *vma, |
1011 | unsigned int fault) | |
92181f19 | 1012 | { |
3a13c4d7 | 1013 | if (fatal_signal_pending(current) && !(error_code & PF_USER)) { |
3a13c4d7 JW |
1014 | no_context(regs, error_code, address, 0, 0); |
1015 | return; | |
b80ef10e | 1016 | } |
b80ef10e | 1017 | |
2d4a7167 | 1018 | if (fault & VM_FAULT_OOM) { |
f8626854 AV |
1019 | /* Kernel mode? Handle exceptions or die: */ |
1020 | if (!(error_code & PF_USER)) { | |
4fc34901 AL |
1021 | no_context(regs, error_code, address, |
1022 | SIGSEGV, SEGV_MAPERR); | |
3a13c4d7 | 1023 | return; |
f8626854 AV |
1024 | } |
1025 | ||
c2d23f91 DR |
1026 | /* |
1027 | * We ran out of memory, call the OOM killer, and return the | |
1028 | * userspace (which will retry the fault, or kill us if we got | |
1029 | * oom-killed): | |
1030 | */ | |
1031 | pagefault_out_of_memory(); | |
2d4a7167 | 1032 | } else { |
f672b49b AK |
1033 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
1034 | VM_FAULT_HWPOISON_LARGE)) | |
7b2d0dba | 1035 | do_sigbus(regs, error_code, address, vma, fault); |
33692f27 | 1036 | else if (fault & VM_FAULT_SIGSEGV) |
7b2d0dba | 1037 | bad_area_nosemaphore(regs, error_code, address, vma); |
2d4a7167 IM |
1038 | else |
1039 | BUG(); | |
1040 | } | |
92181f19 NP |
1041 | } |
1042 | ||
d8b57bb7 TG |
1043 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
1044 | { | |
1045 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
1046 | return 0; | |
2d4a7167 | 1047 | |
d8b57bb7 TG |
1048 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
1049 | return 0; | |
b3ecd515 DH |
1050 | /* |
1051 | * Note: We do not do lazy flushing on protection key | |
1052 | * changes, so no spurious fault will ever set PF_PK. | |
1053 | */ | |
1054 | if ((error_code & PF_PK)) | |
1055 | return 1; | |
d8b57bb7 TG |
1056 | |
1057 | return 1; | |
1058 | } | |
1059 | ||
5b727a3b | 1060 | /* |
2d4a7167 IM |
1061 | * Handle a spurious fault caused by a stale TLB entry. |
1062 | * | |
1063 | * This allows us to lazily refresh the TLB when increasing the | |
1064 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
1065 | * eagerly is very expensive since that implies doing a full | |
1066 | * cross-processor TLB flush, even if no stale TLB entries exist | |
1067 | * on other processors. | |
1068 | * | |
31668511 DV |
1069 | * Spurious faults may only occur if the TLB contains an entry with |
1070 | * fewer permission than the page table entry. Non-present (P = 0) | |
1071 | * and reserved bit (R = 1) faults are never spurious. | |
1072 | * | |
5b727a3b JF |
1073 | * There are no security implications to leaving a stale TLB when |
1074 | * increasing the permissions on a page. | |
31668511 DV |
1075 | * |
1076 | * Returns non-zero if a spurious fault was handled, zero otherwise. | |
1077 | * | |
1078 | * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 | |
1079 | * (Optional Invalidation). | |
5b727a3b | 1080 | */ |
9326638c | 1081 | static noinline int |
2d4a7167 | 1082 | spurious_fault(unsigned long error_code, unsigned long address) |
5b727a3b JF |
1083 | { |
1084 | pgd_t *pgd; | |
1085 | pud_t *pud; | |
1086 | pmd_t *pmd; | |
1087 | pte_t *pte; | |
3c3e5694 | 1088 | int ret; |
5b727a3b | 1089 | |
31668511 DV |
1090 | /* |
1091 | * Only writes to RO or instruction fetches from NX may cause | |
1092 | * spurious faults. | |
1093 | * | |
1094 | * These could be from user or supervisor accesses but the TLB | |
1095 | * is only lazily flushed after a kernel mapping protection | |
1096 | * change, so user accesses are not expected to cause spurious | |
1097 | * faults. | |
1098 | */ | |
1099 | if (error_code != (PF_WRITE | PF_PROT) | |
1100 | && error_code != (PF_INSTR | PF_PROT)) | |
5b727a3b JF |
1101 | return 0; |
1102 | ||
1103 | pgd = init_mm.pgd + pgd_index(address); | |
1104 | if (!pgd_present(*pgd)) | |
1105 | return 0; | |
1106 | ||
1107 | pud = pud_offset(pgd, address); | |
1108 | if (!pud_present(*pud)) | |
1109 | return 0; | |
1110 | ||
d8b57bb7 TG |
1111 | if (pud_large(*pud)) |
1112 | return spurious_fault_check(error_code, (pte_t *) pud); | |
1113 | ||
5b727a3b JF |
1114 | pmd = pmd_offset(pud, address); |
1115 | if (!pmd_present(*pmd)) | |
1116 | return 0; | |
1117 | ||
d8b57bb7 TG |
1118 | if (pmd_large(*pmd)) |
1119 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
1120 | ||
5b727a3b | 1121 | pte = pte_offset_kernel(pmd, address); |
954f8571 | 1122 | if (!pte_present(*pte)) |
5b727a3b JF |
1123 | return 0; |
1124 | ||
3c3e5694 SR |
1125 | ret = spurious_fault_check(error_code, pte); |
1126 | if (!ret) | |
1127 | return 0; | |
1128 | ||
1129 | /* | |
2d4a7167 IM |
1130 | * Make sure we have permissions in PMD. |
1131 | * If not, then there's a bug in the page tables: | |
3c3e5694 SR |
1132 | */ |
1133 | ret = spurious_fault_check(error_code, (pte_t *) pmd); | |
1134 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | |
2d4a7167 | 1135 | |
3c3e5694 | 1136 | return ret; |
5b727a3b | 1137 | } |
9326638c | 1138 | NOKPROBE_SYMBOL(spurious_fault); |
5b727a3b | 1139 | |
abd4f750 | 1140 | int show_unhandled_signals = 1; |
1da177e4 | 1141 | |
2d4a7167 | 1142 | static inline int |
68da336a | 1143 | access_error(unsigned long error_code, struct vm_area_struct *vma) |
92181f19 | 1144 | { |
07f146f5 DH |
1145 | /* This is only called for the current mm, so: */ |
1146 | bool foreign = false; | |
e8c6226d DH |
1147 | |
1148 | /* | |
1149 | * Read or write was blocked by protection keys. This is | |
1150 | * always an unconditional error and can never result in | |
1151 | * a follow-up action to resolve the fault, like a COW. | |
1152 | */ | |
1153 | if (error_code & PF_PK) | |
1154 | return 1; | |
1155 | ||
07f146f5 DH |
1156 | /* |
1157 | * Make sure to check the VMA so that we do not perform | |
1158 | * faults just to hit a PF_PK as soon as we fill in a | |
1159 | * page. | |
1160 | */ | |
d61172b4 DH |
1161 | if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), |
1162 | (error_code & PF_INSTR), foreign)) | |
07f146f5 | 1163 | return 1; |
33a709b2 | 1164 | |
68da336a | 1165 | if (error_code & PF_WRITE) { |
2d4a7167 | 1166 | /* write, present and write, not present: */ |
92181f19 NP |
1167 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
1168 | return 1; | |
2d4a7167 | 1169 | return 0; |
92181f19 NP |
1170 | } |
1171 | ||
2d4a7167 IM |
1172 | /* read, present: */ |
1173 | if (unlikely(error_code & PF_PROT)) | |
1174 | return 1; | |
1175 | ||
1176 | /* read, not present: */ | |
1177 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
1178 | return 1; | |
1179 | ||
92181f19 NP |
1180 | return 0; |
1181 | } | |
1182 | ||
0973a06c HS |
1183 | static int fault_in_kernel_space(unsigned long address) |
1184 | { | |
d9517346 | 1185 | return address >= TASK_SIZE_MAX; |
0973a06c HS |
1186 | } |
1187 | ||
40d3cd66 PA |
1188 | static inline bool smap_violation(int error_code, struct pt_regs *regs) |
1189 | { | |
4640c7ee PA |
1190 | if (!IS_ENABLED(CONFIG_X86_SMAP)) |
1191 | return false; | |
1192 | ||
1193 | if (!static_cpu_has(X86_FEATURE_SMAP)) | |
1194 | return false; | |
1195 | ||
40d3cd66 PA |
1196 | if (error_code & PF_USER) |
1197 | return false; | |
1198 | ||
f39b6f0e | 1199 | if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) |
40d3cd66 PA |
1200 | return false; |
1201 | ||
1202 | return true; | |
1203 | } | |
1204 | ||
1da177e4 LT |
1205 | /* |
1206 | * This routine handles page faults. It determines the address, | |
1207 | * and the problem, and then passes it off to one of the appropriate | |
1208 | * routines. | |
d4078e23 PZ |
1209 | * |
1210 | * This function must have noinline because both callers | |
1211 | * {,trace_}do_page_fault() have notrace on. Having this an actual function | |
1212 | * guarantees there's a function trace entry. | |
1da177e4 | 1213 | */ |
9326638c | 1214 | static noinline void |
0ac09f9f JO |
1215 | __do_page_fault(struct pt_regs *regs, unsigned long error_code, |
1216 | unsigned long address) | |
1da177e4 | 1217 | { |
2d4a7167 | 1218 | struct vm_area_struct *vma; |
1da177e4 LT |
1219 | struct task_struct *tsk; |
1220 | struct mm_struct *mm; | |
26178ec1 | 1221 | int fault, major = 0; |
759496ba | 1222 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1da177e4 | 1223 | |
a9ba9a3b AV |
1224 | tsk = current; |
1225 | mm = tsk->mm; | |
2d4a7167 | 1226 | |
f8561296 VN |
1227 | /* |
1228 | * Detect and handle instructions that would cause a page fault for | |
1229 | * both a tracked kernel page and a userspace page. | |
1230 | */ | |
1231 | if (kmemcheck_active(regs)) | |
1232 | kmemcheck_hide(regs); | |
5dfaf90f | 1233 | prefetchw(&mm->mmap_sem); |
f8561296 | 1234 | |
0fd0e3da | 1235 | if (unlikely(kmmio_fault(regs, address))) |
86069782 | 1236 | return; |
1da177e4 LT |
1237 | |
1238 | /* | |
1239 | * We fault-in kernel-space virtual memory on-demand. The | |
1240 | * 'reference' page table is init_mm.pgd. | |
1241 | * | |
1242 | * NOTE! We MUST NOT take any locks for this case. We may | |
1243 | * be in an interrupt or a critical region, and should | |
1244 | * only copy the information from the master page table, | |
1245 | * nothing more. | |
1246 | * | |
1247 | * This verifies that the fault happens in kernel space | |
1248 | * (error_code & 4) == 0, and that the fault was not a | |
8b1bde93 | 1249 | * protection error (error_code & 9) == 0. |
1da177e4 | 1250 | */ |
0973a06c | 1251 | if (unlikely(fault_in_kernel_space(address))) { |
f8561296 VN |
1252 | if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { |
1253 | if (vmalloc_fault(address) >= 0) | |
1254 | return; | |
1255 | ||
1256 | if (kmemcheck_fault(regs, address, error_code)) | |
1257 | return; | |
1258 | } | |
5b727a3b | 1259 | |
2d4a7167 | 1260 | /* Can handle a stale RO->RW TLB: */ |
92181f19 | 1261 | if (spurious_fault(error_code, address)) |
5b727a3b JF |
1262 | return; |
1263 | ||
2d4a7167 | 1264 | /* kprobes don't want to hook the spurious faults: */ |
e00b12e6 | 1265 | if (kprobes_fault(regs)) |
9be260a6 | 1266 | return; |
f8c2ee22 HH |
1267 | /* |
1268 | * Don't take the mm semaphore here. If we fixup a prefetch | |
2d4a7167 | 1269 | * fault we could otherwise deadlock: |
f8c2ee22 | 1270 | */ |
7b2d0dba | 1271 | bad_area_nosemaphore(regs, error_code, address, NULL); |
2d4a7167 | 1272 | |
92181f19 | 1273 | return; |
f8c2ee22 HH |
1274 | } |
1275 | ||
2d4a7167 | 1276 | /* kprobes don't want to hook the spurious faults: */ |
e00b12e6 | 1277 | if (unlikely(kprobes_fault(regs))) |
9be260a6 | 1278 | return; |
8c914cb7 | 1279 | |
66c58156 | 1280 | if (unlikely(error_code & PF_RSVD)) |
92181f19 | 1281 | pgtable_bad(regs, error_code, address); |
1da177e4 | 1282 | |
4640c7ee | 1283 | if (unlikely(smap_violation(error_code, regs))) { |
7b2d0dba | 1284 | bad_area_nosemaphore(regs, error_code, address, NULL); |
4640c7ee | 1285 | return; |
40d3cd66 PA |
1286 | } |
1287 | ||
1da177e4 | 1288 | /* |
2d4a7167 | 1289 | * If we're in an interrupt, have no user context or are running |
70ffdb93 | 1290 | * in a region with pagefaults disabled then we must not take the fault |
1da177e4 | 1291 | */ |
70ffdb93 | 1292 | if (unlikely(faulthandler_disabled() || !mm)) { |
7b2d0dba | 1293 | bad_area_nosemaphore(regs, error_code, address, NULL); |
92181f19 NP |
1294 | return; |
1295 | } | |
1da177e4 | 1296 | |
e00b12e6 PZ |
1297 | /* |
1298 | * It's safe to allow irq's after cr2 has been saved and the | |
1299 | * vmalloc fault has been handled. | |
1300 | * | |
1301 | * User-mode registers count as a user access even for any | |
1302 | * potential system fault or CPU buglet: | |
1303 | */ | |
f39b6f0e | 1304 | if (user_mode(regs)) { |
e00b12e6 PZ |
1305 | local_irq_enable(); |
1306 | error_code |= PF_USER; | |
1307 | flags |= FAULT_FLAG_USER; | |
1308 | } else { | |
1309 | if (regs->flags & X86_EFLAGS_IF) | |
1310 | local_irq_enable(); | |
1311 | } | |
1312 | ||
1313 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
1314 | ||
759496ba JW |
1315 | if (error_code & PF_WRITE) |
1316 | flags |= FAULT_FLAG_WRITE; | |
d61172b4 DH |
1317 | if (error_code & PF_INSTR) |
1318 | flags |= FAULT_FLAG_INSTRUCTION; | |
759496ba | 1319 | |
3a1dfe6e IM |
1320 | /* |
1321 | * When running in the kernel we expect faults to occur only to | |
2d4a7167 IM |
1322 | * addresses in user space. All other faults represent errors in |
1323 | * the kernel and should generate an OOPS. Unfortunately, in the | |
1324 | * case of an erroneous fault occurring in a code path which already | |
1325 | * holds mmap_sem we will deadlock attempting to validate the fault | |
1326 | * against the address space. Luckily the kernel only validly | |
1327 | * references user space from well defined areas of code, which are | |
1328 | * listed in the exceptions table. | |
1da177e4 LT |
1329 | * |
1330 | * As the vast majority of faults will be valid we will only perform | |
2d4a7167 IM |
1331 | * the source reference check when there is a possibility of a |
1332 | * deadlock. Attempt to lock the address space, if we cannot we then | |
1333 | * validate the source. If this is invalid we can skip the address | |
1334 | * space check, thus avoiding the deadlock: | |
1da177e4 | 1335 | */ |
92181f19 | 1336 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
66c58156 | 1337 | if ((error_code & PF_USER) == 0 && |
92181f19 | 1338 | !search_exception_tables(regs->ip)) { |
7b2d0dba | 1339 | bad_area_nosemaphore(regs, error_code, address, NULL); |
92181f19 NP |
1340 | return; |
1341 | } | |
d065bd81 | 1342 | retry: |
1da177e4 | 1343 | down_read(&mm->mmap_sem); |
01006074 PZ |
1344 | } else { |
1345 | /* | |
2d4a7167 IM |
1346 | * The above down_read_trylock() might have succeeded in |
1347 | * which case we'll have missed the might_sleep() from | |
1348 | * down_read(): | |
01006074 PZ |
1349 | */ |
1350 | might_sleep(); | |
1da177e4 LT |
1351 | } |
1352 | ||
1353 | vma = find_vma(mm, address); | |
92181f19 NP |
1354 | if (unlikely(!vma)) { |
1355 | bad_area(regs, error_code, address); | |
1356 | return; | |
1357 | } | |
1358 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 1359 | goto good_area; |
92181f19 NP |
1360 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
1361 | bad_area(regs, error_code, address); | |
1362 | return; | |
1363 | } | |
33cb5243 | 1364 | if (error_code & PF_USER) { |
6f4d368e HH |
1365 | /* |
1366 | * Accessing the stack below %sp is always a bug. | |
1367 | * The large cushion allows instructions like enter | |
2d4a7167 | 1368 | * and pusha to work. ("enter $65535, $31" pushes |
6f4d368e | 1369 | * 32 pointers and then decrements %sp by 65535.) |
03fdc2c2 | 1370 | */ |
92181f19 NP |
1371 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
1372 | bad_area(regs, error_code, address); | |
1373 | return; | |
1374 | } | |
1da177e4 | 1375 | } |
92181f19 NP |
1376 | if (unlikely(expand_stack(vma, address))) { |
1377 | bad_area(regs, error_code, address); | |
1378 | return; | |
1379 | } | |
1380 | ||
1381 | /* | |
1382 | * Ok, we have a good vm_area for this memory access, so | |
1383 | * we can handle it.. | |
1384 | */ | |
1da177e4 | 1385 | good_area: |
68da336a | 1386 | if (unlikely(access_error(error_code, vma))) { |
7b2d0dba | 1387 | bad_area_access_error(regs, error_code, address, vma); |
92181f19 | 1388 | return; |
1da177e4 LT |
1389 | } |
1390 | ||
1391 | /* | |
1392 | * If for any reason at all we couldn't handle the fault, | |
1393 | * make sure we exit gracefully rather than endlessly redo | |
9a95f3cf PC |
1394 | * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if |
1395 | * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. | |
1da177e4 | 1396 | */ |
dcddffd4 | 1397 | fault = handle_mm_fault(vma, address, flags); |
26178ec1 | 1398 | major |= fault & VM_FAULT_MAJOR; |
2d4a7167 | 1399 | |
3a13c4d7 | 1400 | /* |
26178ec1 LT |
1401 | * If we need to retry the mmap_sem has already been released, |
1402 | * and if there is a fatal signal pending there is no guarantee | |
1403 | * that we made any progress. Handle this case first. | |
3a13c4d7 | 1404 | */ |
26178ec1 LT |
1405 | if (unlikely(fault & VM_FAULT_RETRY)) { |
1406 | /* Retry at most once */ | |
1407 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
1408 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
1409 | flags |= FAULT_FLAG_TRIED; | |
1410 | if (!fatal_signal_pending(tsk)) | |
1411 | goto retry; | |
1412 | } | |
1413 | ||
1414 | /* User mode? Just return to handle the fatal exception */ | |
cf3c0a15 | 1415 | if (flags & FAULT_FLAG_USER) |
26178ec1 LT |
1416 | return; |
1417 | ||
1418 | /* Not returning to user mode? Handle exceptions or die: */ | |
1419 | no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); | |
3a13c4d7 | 1420 | return; |
26178ec1 | 1421 | } |
3a13c4d7 | 1422 | |
26178ec1 | 1423 | up_read(&mm->mmap_sem); |
3a13c4d7 | 1424 | if (unlikely(fault & VM_FAULT_ERROR)) { |
7b2d0dba | 1425 | mm_fault_error(regs, error_code, address, vma, fault); |
3a13c4d7 | 1426 | return; |
37b23e05 KM |
1427 | } |
1428 | ||
d065bd81 | 1429 | /* |
26178ec1 LT |
1430 | * Major/minor page fault accounting. If any of the events |
1431 | * returned VM_FAULT_MAJOR, we account it as a major fault. | |
d065bd81 | 1432 | */ |
26178ec1 LT |
1433 | if (major) { |
1434 | tsk->maj_flt++; | |
1435 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
1436 | } else { | |
1437 | tsk->min_flt++; | |
1438 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
ac17dc8e | 1439 | } |
d729ab35 | 1440 | |
8c938f9f | 1441 | check_v8086_mode(regs, address, tsk); |
1da177e4 | 1442 | } |
9326638c | 1443 | NOKPROBE_SYMBOL(__do_page_fault); |
6ba3c97a | 1444 | |
9326638c | 1445 | dotraplinkage void notrace |
6ba3c97a FW |
1446 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1447 | { | |
d4078e23 | 1448 | unsigned long address = read_cr2(); /* Get the faulting address */ |
6c1e0256 | 1449 | enum ctx_state prev_state; |
d4078e23 PZ |
1450 | |
1451 | /* | |
1452 | * We must have this function tagged with __kprobes, notrace and call | |
1453 | * read_cr2() before calling anything else. To avoid calling any kind | |
1454 | * of tracing machinery before we've observed the CR2 value. | |
1455 | * | |
1456 | * exception_{enter,exit}() contain all sorts of tracepoints. | |
1457 | */ | |
6c1e0256 FW |
1458 | |
1459 | prev_state = exception_enter(); | |
0ac09f9f | 1460 | __do_page_fault(regs, error_code, address); |
6c1e0256 | 1461 | exception_exit(prev_state); |
6ba3c97a | 1462 | } |
9326638c | 1463 | NOKPROBE_SYMBOL(do_page_fault); |
25c74b10 | 1464 | |
d4078e23 | 1465 | #ifdef CONFIG_TRACING |
9326638c MH |
1466 | static nokprobe_inline void |
1467 | trace_page_fault_entries(unsigned long address, struct pt_regs *regs, | |
1468 | unsigned long error_code) | |
d34603b0 SA |
1469 | { |
1470 | if (user_mode(regs)) | |
d4078e23 | 1471 | trace_page_fault_user(address, regs, error_code); |
d34603b0 | 1472 | else |
d4078e23 | 1473 | trace_page_fault_kernel(address, regs, error_code); |
d34603b0 SA |
1474 | } |
1475 | ||
9326638c | 1476 | dotraplinkage void notrace |
25c74b10 SA |
1477 | trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1478 | { | |
0ac09f9f JO |
1479 | /* |
1480 | * The exception_enter and tracepoint processing could | |
1481 | * trigger another page faults (user space callchain | |
1482 | * reading) and destroy the original cr2 value, so read | |
1483 | * the faulting address now. | |
1484 | */ | |
1485 | unsigned long address = read_cr2(); | |
d4078e23 | 1486 | enum ctx_state prev_state; |
25c74b10 SA |
1487 | |
1488 | prev_state = exception_enter(); | |
d4078e23 | 1489 | trace_page_fault_entries(address, regs, error_code); |
0ac09f9f | 1490 | __do_page_fault(regs, error_code, address); |
25c74b10 SA |
1491 | exception_exit(prev_state); |
1492 | } | |
9326638c | 1493 | NOKPROBE_SYMBOL(trace_do_page_fault); |
d4078e23 | 1494 | #endif /* CONFIG_TRACING */ |