]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1995 Linus Torvalds |
2d4a7167 | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
f8eeb2e6 | 4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar |
1da177e4 | 5 | */ |
a2bcd473 | 6 | #include <linux/sched.h> /* test_thread_flag(), ... */ |
68db0cf1 | 7 | #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ |
a2bcd473 | 8 | #include <linux/kdebug.h> /* oops_begin/end, ... */ |
4cdf8dbe | 9 | #include <linux/extable.h> /* search_exception_tables */ |
a2bcd473 | 10 | #include <linux/bootmem.h> /* max_low_pfn */ |
9326638c | 11 | #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ |
a2bcd473 | 12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ |
cdd6c482 | 13 | #include <linux/perf_event.h> /* perf_sw_event */ |
f672b49b | 14 | #include <linux/hugetlb.h> /* hstate_index_to_shift */ |
268bb0ce | 15 | #include <linux/prefetch.h> /* prefetchw */ |
56dd9470 | 16 | #include <linux/context_tracking.h> /* exception_enter(), ... */ |
70ffdb93 | 17 | #include <linux/uaccess.h> /* faulthandler_disabled() */ |
2d4a7167 | 18 | |
019132ff | 19 | #include <asm/cpufeature.h> /* boot_cpu_has, ... */ |
a2bcd473 IM |
20 | #include <asm/traps.h> /* dotraplinkage, ... */ |
21 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | |
f8561296 | 22 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
f40c3300 AL |
23 | #include <asm/fixmap.h> /* VSYSCALL_ADDR */ |
24 | #include <asm/vsyscall.h> /* emulate_vsyscall */ | |
ba3e127e | 25 | #include <asm/vm86.h> /* struct vm86 */ |
019132ff | 26 | #include <asm/mmu_context.h> /* vma_pkey() */ |
1da177e4 | 27 | |
d34603b0 SA |
28 | #define CREATE_TRACE_POINTS |
29 | #include <asm/trace/exceptions.h> | |
30 | ||
33cb5243 | 31 | /* |
2d4a7167 IM |
32 | * Page fault error code bits: |
33 | * | |
34 | * bit 0 == 0: no page found 1: protection fault | |
35 | * bit 1 == 0: read access 1: write access | |
36 | * bit 2 == 0: kernel-mode access 1: user-mode access | |
37 | * bit 3 == 1: use of reserved bit detected | |
38 | * bit 4 == 1: fault was an instruction fetch | |
b3ecd515 | 39 | * bit 5 == 1: protection keys block access |
33cb5243 | 40 | */ |
2d4a7167 IM |
41 | enum x86_pf_error_code { |
42 | ||
43 | PF_PROT = 1 << 0, | |
44 | PF_WRITE = 1 << 1, | |
45 | PF_USER = 1 << 2, | |
46 | PF_RSVD = 1 << 3, | |
47 | PF_INSTR = 1 << 4, | |
b3ecd515 | 48 | PF_PK = 1 << 5, |
2d4a7167 | 49 | }; |
66c58156 | 50 | |
b814d41f | 51 | /* |
b319eed0 IM |
52 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
53 | * handled by mmiotrace: | |
b814d41f | 54 | */ |
9326638c | 55 | static nokprobe_inline int |
62c9295f | 56 | kmmio_fault(struct pt_regs *regs, unsigned long addr) |
86069782 | 57 | { |
0fd0e3da PP |
58 | if (unlikely(is_kmmio_active())) |
59 | if (kmmio_handler(regs, addr) == 1) | |
60 | return -1; | |
0fd0e3da | 61 | return 0; |
86069782 PP |
62 | } |
63 | ||
9326638c | 64 | static nokprobe_inline int kprobes_fault(struct pt_regs *regs) |
1bd858a5 | 65 | { |
74a0b576 CH |
66 | int ret = 0; |
67 | ||
68 | /* kprobe_running() needs smp_processor_id() */ | |
f39b6f0e | 69 | if (kprobes_built_in() && !user_mode(regs)) { |
74a0b576 CH |
70 | preempt_disable(); |
71 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
72 | ret = 1; | |
73 | preempt_enable(); | |
74 | } | |
1bd858a5 | 75 | |
74a0b576 | 76 | return ret; |
33cb5243 | 77 | } |
1bd858a5 | 78 | |
1dc85be0 | 79 | /* |
2d4a7167 IM |
80 | * Prefetch quirks: |
81 | * | |
82 | * 32-bit mode: | |
83 | * | |
84 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
85 | * Check that here and ignore it. | |
1dc85be0 | 86 | * |
2d4a7167 | 87 | * 64-bit mode: |
1dc85be0 | 88 | * |
2d4a7167 IM |
89 | * Sometimes the CPU reports invalid exceptions on prefetch. |
90 | * Check that here and ignore it. | |
91 | * | |
92 | * Opcode checker based on code by Richard Brunner. | |
1dc85be0 | 93 | */ |
107a0367 IM |
94 | static inline int |
95 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
96 | unsigned char opcode, int *prefetch) | |
97 | { | |
98 | unsigned char instr_hi = opcode & 0xf0; | |
99 | unsigned char instr_lo = opcode & 0x0f; | |
100 | ||
101 | switch (instr_hi) { | |
102 | case 0x20: | |
103 | case 0x30: | |
104 | /* | |
105 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
106 | * In X86_64 long mode, the CPU will signal invalid | |
107 | * opcode if some of these prefixes are present so | |
108 | * X86_64 will never get here anyway | |
109 | */ | |
110 | return ((instr_lo & 7) == 0x6); | |
111 | #ifdef CONFIG_X86_64 | |
112 | case 0x40: | |
113 | /* | |
114 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
115 | * Need to figure out under what instruction mode the | |
116 | * instruction was issued. Could check the LDT for lm, | |
117 | * but for now it's good enough to assume that long | |
118 | * mode only uses well known segments or kernel. | |
119 | */ | |
318f5a2a | 120 | return (!user_mode(regs) || user_64bit_mode(regs)); |
107a0367 IM |
121 | #endif |
122 | case 0x60: | |
123 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
124 | return (instr_lo & 0xC) == 0x4; | |
125 | case 0xF0: | |
126 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
127 | return !instr_lo || (instr_lo>>1) == 1; | |
128 | case 0x00: | |
129 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
130 | if (probe_kernel_address(instr, opcode)) | |
131 | return 0; | |
132 | ||
133 | *prefetch = (instr_lo == 0xF) && | |
134 | (opcode == 0x0D || opcode == 0x18); | |
135 | return 0; | |
136 | default: | |
137 | return 0; | |
138 | } | |
139 | } | |
140 | ||
2d4a7167 IM |
141 | static int |
142 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
33cb5243 | 143 | { |
2d4a7167 | 144 | unsigned char *max_instr; |
ab2bf0c1 | 145 | unsigned char *instr; |
33cb5243 | 146 | int prefetch = 0; |
1da177e4 | 147 | |
3085354d IM |
148 | /* |
149 | * If it was a exec (instruction fetch) fault on NX page, then | |
150 | * do not ignore the fault: | |
151 | */ | |
66c58156 | 152 | if (error_code & PF_INSTR) |
1da177e4 | 153 | return 0; |
1dc85be0 | 154 | |
107a0367 | 155 | instr = (void *)convert_ip_to_linear(current, regs); |
f1290ec9 | 156 | max_instr = instr + 15; |
1da177e4 | 157 | |
d31bf07f | 158 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) |
1da177e4 LT |
159 | return 0; |
160 | ||
107a0367 | 161 | while (instr < max_instr) { |
2d4a7167 | 162 | unsigned char opcode; |
1da177e4 | 163 | |
ab2bf0c1 | 164 | if (probe_kernel_address(instr, opcode)) |
33cb5243 | 165 | break; |
1da177e4 | 166 | |
1da177e4 LT |
167 | instr++; |
168 | ||
107a0367 | 169 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
1da177e4 | 170 | break; |
1da177e4 LT |
171 | } |
172 | return prefetch; | |
173 | } | |
174 | ||
019132ff DH |
175 | /* |
176 | * A protection key fault means that the PKRU value did not allow | |
177 | * access to some PTE. Userspace can figure out what PKRU was | |
178 | * from the XSAVE state, and this function fills out a field in | |
179 | * siginfo so userspace can discover which protection key was set | |
180 | * on the PTE. | |
181 | * | |
182 | * If we get here, we know that the hardware signaled a PF_PK | |
183 | * fault and that there was a VMA once we got in the fault | |
184 | * handler. It does *not* guarantee that the VMA we find here | |
185 | * was the one that we faulted on. | |
186 | * | |
187 | * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); | |
188 | * 2. T1 : set PKRU to deny access to pkey=4, touches page | |
189 | * 3. T1 : faults... | |
190 | * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); | |
191 | * 5. T1 : enters fault handler, takes mmap_sem, etc... | |
192 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really | |
193 | * faulted on a pte with its pkey=4. | |
194 | */ | |
195 | static void fill_sig_info_pkey(int si_code, siginfo_t *info, | |
196 | struct vm_area_struct *vma) | |
197 | { | |
198 | /* This is effectively an #ifdef */ | |
199 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) | |
200 | return; | |
201 | ||
202 | /* Fault not from Protection Keys: nothing to do */ | |
203 | if (si_code != SEGV_PKUERR) | |
204 | return; | |
205 | /* | |
206 | * force_sig_info_fault() is called from a number of | |
207 | * contexts, some of which have a VMA and some of which | |
208 | * do not. The PF_PK handing happens after we have a | |
209 | * valid VMA, so we should never reach this without a | |
210 | * valid VMA. | |
211 | */ | |
212 | if (!vma) { | |
213 | WARN_ONCE(1, "PKU fault with no VMA passed in"); | |
214 | info->si_pkey = 0; | |
215 | return; | |
216 | } | |
217 | /* | |
218 | * si_pkey should be thought of as a strong hint, but not | |
219 | * absolutely guranteed to be 100% accurate because of | |
220 | * the race explained above. | |
221 | */ | |
222 | info->si_pkey = vma_pkey(vma); | |
223 | } | |
224 | ||
2d4a7167 IM |
225 | static void |
226 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
7b2d0dba DH |
227 | struct task_struct *tsk, struct vm_area_struct *vma, |
228 | int fault) | |
c4aba4a8 | 229 | { |
f672b49b | 230 | unsigned lsb = 0; |
c4aba4a8 HH |
231 | siginfo_t info; |
232 | ||
2d4a7167 IM |
233 | info.si_signo = si_signo; |
234 | info.si_errno = 0; | |
235 | info.si_code = si_code; | |
236 | info.si_addr = (void __user *)address; | |
f672b49b AK |
237 | if (fault & VM_FAULT_HWPOISON_LARGE) |
238 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
239 | if (fault & VM_FAULT_HWPOISON) | |
240 | lsb = PAGE_SHIFT; | |
241 | info.si_addr_lsb = lsb; | |
2d4a7167 | 242 | |
019132ff DH |
243 | fill_sig_info_pkey(si_code, &info, vma); |
244 | ||
c4aba4a8 HH |
245 | force_sig_info(si_signo, &info, tsk); |
246 | } | |
247 | ||
f2f13a85 IM |
248 | DEFINE_SPINLOCK(pgd_lock); |
249 | LIST_HEAD(pgd_list); | |
250 | ||
251 | #ifdef CONFIG_X86_32 | |
252 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
33cb5243 | 253 | { |
f2f13a85 IM |
254 | unsigned index = pgd_index(address); |
255 | pgd_t *pgd_k; | |
e0c4f675 | 256 | p4d_t *p4d, *p4d_k; |
f2f13a85 IM |
257 | pud_t *pud, *pud_k; |
258 | pmd_t *pmd, *pmd_k; | |
2d4a7167 | 259 | |
f2f13a85 IM |
260 | pgd += index; |
261 | pgd_k = init_mm.pgd + index; | |
262 | ||
263 | if (!pgd_present(*pgd_k)) | |
264 | return NULL; | |
265 | ||
266 | /* | |
267 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
268 | * and redundant with the set_pmd() on non-PAE. As would | |
e0c4f675 | 269 | * set_p4d/set_pud. |
f2f13a85 | 270 | */ |
e0c4f675 KS |
271 | p4d = p4d_offset(pgd, address); |
272 | p4d_k = p4d_offset(pgd_k, address); | |
273 | if (!p4d_present(*p4d_k)) | |
274 | return NULL; | |
275 | ||
276 | pud = pud_offset(p4d, address); | |
277 | pud_k = pud_offset(p4d_k, address); | |
f2f13a85 IM |
278 | if (!pud_present(*pud_k)) |
279 | return NULL; | |
280 | ||
281 | pmd = pmd_offset(pud, address); | |
282 | pmd_k = pmd_offset(pud_k, address); | |
283 | if (!pmd_present(*pmd_k)) | |
284 | return NULL; | |
285 | ||
b8bcfe99 | 286 | if (!pmd_present(*pmd)) |
f2f13a85 | 287 | set_pmd(pmd, *pmd_k); |
b8bcfe99 | 288 | else |
f2f13a85 | 289 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
f2f13a85 IM |
290 | |
291 | return pmd_k; | |
292 | } | |
293 | ||
294 | void vmalloc_sync_all(void) | |
295 | { | |
296 | unsigned long address; | |
297 | ||
298 | if (SHARED_KERNEL_PMD) | |
299 | return; | |
300 | ||
301 | for (address = VMALLOC_START & PMD_MASK; | |
dc4fac84 | 302 | address >= TASK_SIZE_MAX && address < FIXADDR_TOP; |
f2f13a85 | 303 | address += PMD_SIZE) { |
f2f13a85 IM |
304 | struct page *page; |
305 | ||
a79e53d8 | 306 | spin_lock(&pgd_lock); |
f2f13a85 | 307 | list_for_each_entry(page, &pgd_list, lru) { |
617d34d9 | 308 | spinlock_t *pgt_lock; |
f01f7c56 | 309 | pmd_t *ret; |
617d34d9 | 310 | |
a79e53d8 | 311 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
312 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
313 | ||
314 | spin_lock(pgt_lock); | |
315 | ret = vmalloc_sync_one(page_address(page), address); | |
316 | spin_unlock(pgt_lock); | |
317 | ||
318 | if (!ret) | |
f2f13a85 IM |
319 | break; |
320 | } | |
a79e53d8 | 321 | spin_unlock(&pgd_lock); |
f2f13a85 IM |
322 | } |
323 | } | |
324 | ||
325 | /* | |
326 | * 32-bit: | |
327 | * | |
328 | * Handle a fault on the vmalloc or module mapping area | |
329 | */ | |
9326638c | 330 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
331 | { |
332 | unsigned long pgd_paddr; | |
333 | pmd_t *pmd_k; | |
334 | pte_t *pte_k; | |
335 | ||
336 | /* Make sure we are in vmalloc area: */ | |
337 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
338 | return -1; | |
339 | ||
ebc8827f FW |
340 | WARN_ON_ONCE(in_nmi()); |
341 | ||
f2f13a85 IM |
342 | /* |
343 | * Synchronize this task's top level page-table | |
344 | * with the 'reference' page table. | |
345 | * | |
346 | * Do _not_ use "current" here. We might be inside | |
347 | * an interrupt in the middle of a task switch.. | |
348 | */ | |
349 | pgd_paddr = read_cr3(); | |
350 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
351 | if (!pmd_k) | |
352 | return -1; | |
353 | ||
f4eafd8b TK |
354 | if (pmd_huge(*pmd_k)) |
355 | return 0; | |
356 | ||
f2f13a85 IM |
357 | pte_k = pte_offset_kernel(pmd_k, address); |
358 | if (!pte_present(*pte_k)) | |
359 | return -1; | |
360 | ||
361 | return 0; | |
362 | } | |
9326638c | 363 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 IM |
364 | |
365 | /* | |
366 | * Did it hit the DOS screen memory VA from vm86 mode? | |
367 | */ | |
368 | static inline void | |
369 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
370 | struct task_struct *tsk) | |
371 | { | |
9fda6a06 | 372 | #ifdef CONFIG_VM86 |
f2f13a85 IM |
373 | unsigned long bit; |
374 | ||
9fda6a06 | 375 | if (!v8086_mode(regs) || !tsk->thread.vm86) |
f2f13a85 IM |
376 | return; |
377 | ||
378 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
379 | if (bit < 32) | |
9fda6a06 BG |
380 | tsk->thread.vm86->screen_bitmap |= 1 << bit; |
381 | #endif | |
33cb5243 | 382 | } |
1da177e4 | 383 | |
087975b0 | 384 | static bool low_pfn(unsigned long pfn) |
1da177e4 | 385 | { |
087975b0 AM |
386 | return pfn < max_low_pfn; |
387 | } | |
1156e098 | 388 | |
087975b0 AM |
389 | static void dump_pagetable(unsigned long address) |
390 | { | |
391 | pgd_t *base = __va(read_cr3()); | |
392 | pgd_t *pgd = &base[pgd_index(address)]; | |
e0c4f675 KS |
393 | p4d_t *p4d; |
394 | pud_t *pud; | |
087975b0 AM |
395 | pmd_t *pmd; |
396 | pte_t *pte; | |
2d4a7167 | 397 | |
1156e098 | 398 | #ifdef CONFIG_X86_PAE |
087975b0 AM |
399 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
400 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) | |
401 | goto out; | |
1156e098 | 402 | #endif |
e0c4f675 KS |
403 | p4d = p4d_offset(pgd, address); |
404 | pud = pud_offset(p4d, address); | |
405 | pmd = pmd_offset(pud, address); | |
087975b0 | 406 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); |
1156e098 HH |
407 | |
408 | /* | |
409 | * We must not directly access the pte in the highpte | |
410 | * case if the page table is located in highmem. | |
411 | * And let's rather not kmap-atomic the pte, just in case | |
2d4a7167 | 412 | * it's allocated already: |
1156e098 | 413 | */ |
087975b0 AM |
414 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
415 | goto out; | |
1156e098 | 416 | |
087975b0 AM |
417 | pte = pte_offset_kernel(pmd, address); |
418 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | |
419 | out: | |
1156e098 | 420 | printk("\n"); |
f2f13a85 IM |
421 | } |
422 | ||
423 | #else /* CONFIG_X86_64: */ | |
424 | ||
425 | void vmalloc_sync_all(void) | |
426 | { | |
5372e155 | 427 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
f2f13a85 IM |
428 | } |
429 | ||
430 | /* | |
431 | * 64-bit: | |
432 | * | |
433 | * Handle a fault on the vmalloc area | |
f2f13a85 | 434 | */ |
9326638c | 435 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
436 | { |
437 | pgd_t *pgd, *pgd_ref; | |
438 | pud_t *pud, *pud_ref; | |
439 | pmd_t *pmd, *pmd_ref; | |
440 | pte_t *pte, *pte_ref; | |
441 | ||
442 | /* Make sure we are in vmalloc area: */ | |
443 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
444 | return -1; | |
445 | ||
ebc8827f FW |
446 | WARN_ON_ONCE(in_nmi()); |
447 | ||
f2f13a85 IM |
448 | /* |
449 | * Copy kernel mappings over when needed. This can also | |
450 | * happen within a race in page table update. In the later | |
451 | * case just flush: | |
452 | */ | |
46aea387 | 453 | pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address); |
f2f13a85 IM |
454 | pgd_ref = pgd_offset_k(address); |
455 | if (pgd_none(*pgd_ref)) | |
456 | return -1; | |
457 | ||
1160c277 | 458 | if (pgd_none(*pgd)) { |
f2f13a85 | 459 | set_pgd(pgd, *pgd_ref); |
1160c277 SK |
460 | arch_flush_lazy_mmu_mode(); |
461 | } else { | |
f2f13a85 | 462 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
1160c277 | 463 | } |
f2f13a85 IM |
464 | |
465 | /* | |
466 | * Below here mismatches are bugs because these lower tables | |
467 | * are shared: | |
468 | */ | |
469 | ||
470 | pud = pud_offset(pgd, address); | |
471 | pud_ref = pud_offset(pgd_ref, address); | |
472 | if (pud_none(*pud_ref)) | |
473 | return -1; | |
474 | ||
f4eafd8b | 475 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) |
f2f13a85 IM |
476 | BUG(); |
477 | ||
f4eafd8b TK |
478 | if (pud_huge(*pud)) |
479 | return 0; | |
480 | ||
f2f13a85 IM |
481 | pmd = pmd_offset(pud, address); |
482 | pmd_ref = pmd_offset(pud_ref, address); | |
483 | if (pmd_none(*pmd_ref)) | |
484 | return -1; | |
485 | ||
f4eafd8b | 486 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) |
f2f13a85 IM |
487 | BUG(); |
488 | ||
f4eafd8b TK |
489 | if (pmd_huge(*pmd)) |
490 | return 0; | |
491 | ||
f2f13a85 IM |
492 | pte_ref = pte_offset_kernel(pmd_ref, address); |
493 | if (!pte_present(*pte_ref)) | |
494 | return -1; | |
495 | ||
496 | pte = pte_offset_kernel(pmd, address); | |
497 | ||
498 | /* | |
499 | * Don't use pte_page here, because the mappings can point | |
500 | * outside mem_map, and the NUMA hash lookup cannot handle | |
501 | * that: | |
502 | */ | |
503 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
504 | BUG(); | |
505 | ||
506 | return 0; | |
507 | } | |
9326638c | 508 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 | 509 | |
e05139f2 | 510 | #ifdef CONFIG_CPU_SUP_AMD |
f2f13a85 | 511 | static const char errata93_warning[] = |
ad361c98 JP |
512 | KERN_ERR |
513 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
514 | "******* Working around it, but it may cause SEGVs or burn power.\n" | |
515 | "******* Please consider a BIOS update.\n" | |
516 | "******* Disabling USB legacy in the BIOS may also help.\n"; | |
e05139f2 | 517 | #endif |
f2f13a85 IM |
518 | |
519 | /* | |
520 | * No vm86 mode in 64-bit mode: | |
521 | */ | |
522 | static inline void | |
523 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
524 | struct task_struct *tsk) | |
525 | { | |
526 | } | |
527 | ||
528 | static int bad_address(void *p) | |
529 | { | |
530 | unsigned long dummy; | |
531 | ||
532 | return probe_kernel_address((unsigned long *)p, dummy); | |
533 | } | |
534 | ||
535 | static void dump_pagetable(unsigned long address) | |
536 | { | |
087975b0 AM |
537 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
538 | pgd_t *pgd = base + pgd_index(address); | |
e0c4f675 | 539 | p4d_t *p4d; |
1da177e4 LT |
540 | pud_t *pud; |
541 | pmd_t *pmd; | |
542 | pte_t *pte; | |
543 | ||
2d4a7167 IM |
544 | if (bad_address(pgd)) |
545 | goto bad; | |
546 | ||
d646bce4 | 547 | printk("PGD %lx ", pgd_val(*pgd)); |
2d4a7167 IM |
548 | |
549 | if (!pgd_present(*pgd)) | |
550 | goto out; | |
1da177e4 | 551 | |
e0c4f675 KS |
552 | p4d = p4d_offset(pgd, address); |
553 | if (bad_address(p4d)) | |
554 | goto bad; | |
555 | ||
556 | printk("P4D %lx ", p4d_val(*p4d)); | |
557 | if (!p4d_present(*p4d) || p4d_large(*p4d)) | |
558 | goto out; | |
559 | ||
560 | pud = pud_offset(p4d, address); | |
2d4a7167 IM |
561 | if (bad_address(pud)) |
562 | goto bad; | |
563 | ||
1da177e4 | 564 | printk("PUD %lx ", pud_val(*pud)); |
b5360222 | 565 | if (!pud_present(*pud) || pud_large(*pud)) |
2d4a7167 | 566 | goto out; |
1da177e4 LT |
567 | |
568 | pmd = pmd_offset(pud, address); | |
2d4a7167 IM |
569 | if (bad_address(pmd)) |
570 | goto bad; | |
571 | ||
1da177e4 | 572 | printk("PMD %lx ", pmd_val(*pmd)); |
2d4a7167 IM |
573 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
574 | goto out; | |
1da177e4 LT |
575 | |
576 | pte = pte_offset_kernel(pmd, address); | |
2d4a7167 IM |
577 | if (bad_address(pte)) |
578 | goto bad; | |
579 | ||
33cb5243 | 580 | printk("PTE %lx", pte_val(*pte)); |
2d4a7167 | 581 | out: |
1da177e4 LT |
582 | printk("\n"); |
583 | return; | |
584 | bad: | |
585 | printk("BAD\n"); | |
8c938f9f IM |
586 | } |
587 | ||
f2f13a85 | 588 | #endif /* CONFIG_X86_64 */ |
1da177e4 | 589 | |
2d4a7167 IM |
590 | /* |
591 | * Workaround for K8 erratum #93 & buggy BIOS. | |
592 | * | |
593 | * BIOS SMM functions are required to use a specific workaround | |
594 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
595 | * | |
596 | * A lot of BIOS that didn't get tested properly miss this. | |
597 | * | |
598 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
599 | * Try to work around it here. | |
600 | * | |
601 | * Note we only handle faults in kernel here. | |
602 | * Does nothing on 32-bit. | |
fdfe8aa8 | 603 | */ |
33cb5243 | 604 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
1da177e4 | 605 | { |
e05139f2 JB |
606 | #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) |
607 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD | |
608 | || boot_cpu_data.x86 != 0xf) | |
609 | return 0; | |
610 | ||
65ea5b03 | 611 | if (address != regs->ip) |
1da177e4 | 612 | return 0; |
2d4a7167 | 613 | |
33cb5243 | 614 | if ((address >> 32) != 0) |
1da177e4 | 615 | return 0; |
2d4a7167 | 616 | |
1da177e4 | 617 | address |= 0xffffffffUL << 32; |
33cb5243 HH |
618 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
619 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
a454ab31 | 620 | printk_once(errata93_warning); |
65ea5b03 | 621 | regs->ip = address; |
1da177e4 LT |
622 | return 1; |
623 | } | |
fdfe8aa8 | 624 | #endif |
1da177e4 | 625 | return 0; |
33cb5243 | 626 | } |
1da177e4 | 627 | |
35f3266f | 628 | /* |
2d4a7167 IM |
629 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
630 | * to illegal addresses >4GB. | |
631 | * | |
632 | * We catch this in the page fault handler because these addresses | |
633 | * are not reachable. Just detect this case and return. Any code | |
35f3266f HH |
634 | * segment in LDT is compatibility mode. |
635 | */ | |
636 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
637 | { | |
638 | #ifdef CONFIG_X86_64 | |
2d4a7167 | 639 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
35f3266f HH |
640 | return 1; |
641 | #endif | |
642 | return 0; | |
643 | } | |
644 | ||
29caf2f9 HH |
645 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
646 | { | |
647 | #ifdef CONFIG_X86_F00F_BUG | |
648 | unsigned long nr; | |
2d4a7167 | 649 | |
29caf2f9 | 650 | /* |
2d4a7167 | 651 | * Pentium F0 0F C7 C8 bug workaround: |
29caf2f9 | 652 | */ |
e2604b49 | 653 | if (boot_cpu_has_bug(X86_BUG_F00F)) { |
29caf2f9 HH |
654 | nr = (address - idt_descr.address) >> 3; |
655 | ||
656 | if (nr == 6) { | |
657 | do_invalid_op(regs, 0); | |
658 | return 1; | |
659 | } | |
660 | } | |
661 | #endif | |
662 | return 0; | |
663 | } | |
664 | ||
8f766149 IM |
665 | static const char nx_warning[] = KERN_CRIT |
666 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | |
eff50c34 JK |
667 | static const char smep_warning[] = KERN_CRIT |
668 | "unable to execute userspace code (SMEP?) (uid: %d)\n"; | |
8f766149 | 669 | |
2d4a7167 IM |
670 | static void |
671 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
672 | unsigned long address) | |
b3279c7f | 673 | { |
1156e098 HH |
674 | if (!oops_may_print()) |
675 | return; | |
676 | ||
1156e098 | 677 | if (error_code & PF_INSTR) { |
93809be8 | 678 | unsigned int level; |
426e34cc MF |
679 | pgd_t *pgd; |
680 | pte_t *pte; | |
2d4a7167 | 681 | |
426e34cc MF |
682 | pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
683 | pgd += pgd_index(address); | |
684 | ||
685 | pte = lookup_address_in_pgd(pgd, address, &level); | |
1156e098 | 686 | |
8f766149 | 687 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
078de5f7 | 688 | printk(nx_warning, from_kuid(&init_user_ns, current_uid())); |
eff50c34 JK |
689 | if (pte && pte_present(*pte) && pte_exec(*pte) && |
690 | (pgd_flags(*pgd) & _PAGE_USER) && | |
1e02ce4c | 691 | (__read_cr4() & X86_CR4_SMEP)) |
eff50c34 | 692 | printk(smep_warning, from_kuid(&init_user_ns, current_uid())); |
1156e098 | 693 | } |
1156e098 | 694 | |
19f0dda9 | 695 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
b3279c7f | 696 | if (address < PAGE_SIZE) |
19f0dda9 | 697 | printk(KERN_CONT "NULL pointer dereference"); |
b3279c7f | 698 | else |
19f0dda9 | 699 | printk(KERN_CONT "paging request"); |
2d4a7167 | 700 | |
f294a8ce | 701 | printk(KERN_CONT " at %p\n", (void *) address); |
bb5e5ce5 | 702 | printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip); |
2d4a7167 | 703 | |
b3279c7f HH |
704 | dump_pagetable(address); |
705 | } | |
706 | ||
2d4a7167 IM |
707 | static noinline void |
708 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
709 | unsigned long address) | |
1da177e4 | 710 | { |
2d4a7167 IM |
711 | struct task_struct *tsk; |
712 | unsigned long flags; | |
713 | int sig; | |
714 | ||
715 | flags = oops_begin(); | |
716 | tsk = current; | |
717 | sig = SIGKILL; | |
1209140c | 718 | |
1da177e4 | 719 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
92181f19 | 720 | tsk->comm, address); |
1da177e4 | 721 | dump_pagetable(address); |
2d4a7167 IM |
722 | |
723 | tsk->thread.cr2 = address; | |
51e7dc70 | 724 | tsk->thread.trap_nr = X86_TRAP_PF; |
2d4a7167 IM |
725 | tsk->thread.error_code = error_code; |
726 | ||
22f5991c | 727 | if (__die("Bad pagetable", regs, error_code)) |
874d93d1 | 728 | sig = 0; |
2d4a7167 | 729 | |
874d93d1 | 730 | oops_end(flags, regs, sig); |
1da177e4 LT |
731 | } |
732 | ||
2d4a7167 IM |
733 | static noinline void |
734 | no_context(struct pt_regs *regs, unsigned long error_code, | |
4fc34901 | 735 | unsigned long address, int signal, int si_code) |
92181f19 NP |
736 | { |
737 | struct task_struct *tsk = current; | |
92181f19 NP |
738 | unsigned long flags; |
739 | int sig; | |
7b2d0dba DH |
740 | /* No context means no VMA to pass down */ |
741 | struct vm_area_struct *vma = NULL; | |
92181f19 | 742 | |
2d4a7167 | 743 | /* Are we prepared to handle this kernel fault? */ |
548acf19 | 744 | if (fixup_exception(regs, X86_TRAP_PF)) { |
c026b359 PZ |
745 | /* |
746 | * Any interrupt that takes a fault gets the fixup. This makes | |
747 | * the below recursive fault logic only apply to a faults from | |
748 | * task context. | |
749 | */ | |
750 | if (in_interrupt()) | |
751 | return; | |
752 | ||
753 | /* | |
754 | * Per the above we're !in_interrupt(), aka. task context. | |
755 | * | |
756 | * In this case we need to make sure we're not recursively | |
757 | * faulting through the emulate_vsyscall() logic. | |
758 | */ | |
2a53ccbc | 759 | if (current->thread.sig_on_uaccess_err && signal) { |
51e7dc70 | 760 | tsk->thread.trap_nr = X86_TRAP_PF; |
4fc34901 AL |
761 | tsk->thread.error_code = error_code | PF_USER; |
762 | tsk->thread.cr2 = address; | |
763 | ||
764 | /* XXX: hwpoison faults will set the wrong code. */ | |
7b2d0dba DH |
765 | force_sig_info_fault(signal, si_code, address, |
766 | tsk, vma, 0); | |
4fc34901 | 767 | } |
c026b359 PZ |
768 | |
769 | /* | |
770 | * Barring that, we can do the fixup and be happy. | |
771 | */ | |
92181f19 | 772 | return; |
4fc34901 | 773 | } |
92181f19 | 774 | |
6271cfdf AL |
775 | #ifdef CONFIG_VMAP_STACK |
776 | /* | |
777 | * Stack overflow? During boot, we can fault near the initial | |
778 | * stack in the direct map, but that's not an overflow -- check | |
779 | * that we're in vmalloc space to avoid this. | |
780 | */ | |
781 | if (is_vmalloc_addr((void *)address) && | |
782 | (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || | |
783 | address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { | |
784 | register void *__sp asm("rsp"); | |
785 | unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); | |
786 | /* | |
787 | * We're likely to be running with very little stack space | |
788 | * left. It's plausible that we'd hit this condition but | |
789 | * double-fault even before we get this far, in which case | |
790 | * we're fine: the double-fault handler will deal with it. | |
791 | * | |
792 | * We don't want to make it all the way into the oops code | |
793 | * and then double-fault, though, because we're likely to | |
794 | * break the console driver and lose most of the stack dump. | |
795 | */ | |
796 | asm volatile ("movq %[stack], %%rsp\n\t" | |
797 | "call handle_stack_overflow\n\t" | |
798 | "1: jmp 1b" | |
799 | : "+r" (__sp) | |
800 | : "D" ("kernel stack overflow (page fault)"), | |
801 | "S" (regs), "d" (address), | |
802 | [stack] "rm" (stack)); | |
803 | unreachable(); | |
804 | } | |
805 | #endif | |
806 | ||
92181f19 | 807 | /* |
2d4a7167 IM |
808 | * 32-bit: |
809 | * | |
810 | * Valid to do another page fault here, because if this fault | |
811 | * had been triggered by is_prefetch fixup_exception would have | |
812 | * handled it. | |
813 | * | |
814 | * 64-bit: | |
92181f19 | 815 | * |
2d4a7167 | 816 | * Hall of shame of CPU/BIOS bugs. |
92181f19 NP |
817 | */ |
818 | if (is_prefetch(regs, error_code, address)) | |
819 | return; | |
820 | ||
821 | if (is_errata93(regs, address)) | |
822 | return; | |
823 | ||
824 | /* | |
825 | * Oops. The kernel tried to access some bad page. We'll have to | |
2d4a7167 | 826 | * terminate things with extreme prejudice: |
92181f19 | 827 | */ |
92181f19 | 828 | flags = oops_begin(); |
92181f19 NP |
829 | |
830 | show_fault_oops(regs, error_code, address); | |
831 | ||
a70857e4 | 832 | if (task_stack_end_corrupted(tsk)) |
b0f4c4b3 | 833 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
19803078 | 834 | |
1cc99544 | 835 | tsk->thread.cr2 = address; |
51e7dc70 | 836 | tsk->thread.trap_nr = X86_TRAP_PF; |
1cc99544 | 837 | tsk->thread.error_code = error_code; |
92181f19 | 838 | |
92181f19 NP |
839 | sig = SIGKILL; |
840 | if (__die("Oops", regs, error_code)) | |
841 | sig = 0; | |
2d4a7167 | 842 | |
92181f19 | 843 | /* Executive summary in case the body of the oops scrolled away */ |
b0f4c4b3 | 844 | printk(KERN_DEFAULT "CR2: %016lx\n", address); |
2d4a7167 | 845 | |
92181f19 | 846 | oops_end(flags, regs, sig); |
92181f19 NP |
847 | } |
848 | ||
2d4a7167 IM |
849 | /* |
850 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
851 | * sysctl is set: | |
852 | */ | |
853 | static inline void | |
854 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
855 | unsigned long address, struct task_struct *tsk) | |
856 | { | |
857 | if (!unhandled_signal(tsk, SIGSEGV)) | |
858 | return; | |
859 | ||
860 | if (!printk_ratelimit()) | |
861 | return; | |
862 | ||
a1a08d1c | 863 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
2d4a7167 IM |
864 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
865 | tsk->comm, task_pid_nr(tsk), address, | |
866 | (void *)regs->ip, (void *)regs->sp, error_code); | |
867 | ||
868 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
869 | ||
870 | printk(KERN_CONT "\n"); | |
871 | } | |
872 | ||
873 | static void | |
874 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba DH |
875 | unsigned long address, struct vm_area_struct *vma, |
876 | int si_code) | |
92181f19 NP |
877 | { |
878 | struct task_struct *tsk = current; | |
879 | ||
880 | /* User mode accesses just cause a SIGSEGV */ | |
881 | if (error_code & PF_USER) { | |
882 | /* | |
2d4a7167 | 883 | * It's possible to have interrupts off here: |
92181f19 NP |
884 | */ |
885 | local_irq_enable(); | |
886 | ||
887 | /* | |
888 | * Valid to do another page fault here because this one came | |
2d4a7167 | 889 | * from user space: |
92181f19 NP |
890 | */ |
891 | if (is_prefetch(regs, error_code, address)) | |
892 | return; | |
893 | ||
894 | if (is_errata100(regs, address)) | |
895 | return; | |
896 | ||
3ae36655 AL |
897 | #ifdef CONFIG_X86_64 |
898 | /* | |
899 | * Instruction fetch faults in the vsyscall page might need | |
900 | * emulation. | |
901 | */ | |
902 | if (unlikely((error_code & PF_INSTR) && | |
f40c3300 | 903 | ((address & ~0xfff) == VSYSCALL_ADDR))) { |
3ae36655 AL |
904 | if (emulate_vsyscall(regs, address)) |
905 | return; | |
906 | } | |
907 | #endif | |
dc4fac84 AL |
908 | |
909 | /* | |
910 | * To avoid leaking information about the kernel page table | |
911 | * layout, pretend that user-mode accesses to kernel addresses | |
912 | * are always protection faults. | |
913 | */ | |
914 | if (address >= TASK_SIZE_MAX) | |
e575a86f | 915 | error_code |= PF_PROT; |
3ae36655 | 916 | |
e575a86f | 917 | if (likely(show_unhandled_signals)) |
2d4a7167 IM |
918 | show_signal_msg(regs, error_code, address, tsk); |
919 | ||
2d4a7167 | 920 | tsk->thread.cr2 = address; |
e575a86f | 921 | tsk->thread.error_code = error_code; |
51e7dc70 | 922 | tsk->thread.trap_nr = X86_TRAP_PF; |
92181f19 | 923 | |
7b2d0dba | 924 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); |
2d4a7167 | 925 | |
92181f19 NP |
926 | return; |
927 | } | |
928 | ||
929 | if (is_f00f_bug(regs, address)) | |
930 | return; | |
931 | ||
4fc34901 | 932 | no_context(regs, error_code, address, SIGSEGV, si_code); |
92181f19 NP |
933 | } |
934 | ||
2d4a7167 IM |
935 | static noinline void |
936 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 937 | unsigned long address, struct vm_area_struct *vma) |
92181f19 | 938 | { |
7b2d0dba | 939 | __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); |
92181f19 NP |
940 | } |
941 | ||
2d4a7167 IM |
942 | static void |
943 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 944 | unsigned long address, struct vm_area_struct *vma, int si_code) |
92181f19 NP |
945 | { |
946 | struct mm_struct *mm = current->mm; | |
947 | ||
948 | /* | |
949 | * Something tried to access memory that isn't in our memory map.. | |
950 | * Fix it, but check if it's kernel or user first.. | |
951 | */ | |
952 | up_read(&mm->mmap_sem); | |
953 | ||
7b2d0dba | 954 | __bad_area_nosemaphore(regs, error_code, address, vma, si_code); |
92181f19 NP |
955 | } |
956 | ||
2d4a7167 IM |
957 | static noinline void |
958 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 | 959 | { |
7b2d0dba | 960 | __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); |
92181f19 NP |
961 | } |
962 | ||
33a709b2 DH |
963 | static inline bool bad_area_access_from_pkeys(unsigned long error_code, |
964 | struct vm_area_struct *vma) | |
965 | { | |
07f146f5 DH |
966 | /* This code is always called on the current mm */ |
967 | bool foreign = false; | |
968 | ||
33a709b2 DH |
969 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
970 | return false; | |
971 | if (error_code & PF_PK) | |
972 | return true; | |
07f146f5 | 973 | /* this checks permission keys on the VMA: */ |
d61172b4 DH |
974 | if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), |
975 | (error_code & PF_INSTR), foreign)) | |
07f146f5 | 976 | return true; |
33a709b2 | 977 | return false; |
92181f19 NP |
978 | } |
979 | ||
2d4a7167 IM |
980 | static noinline void |
981 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 982 | unsigned long address, struct vm_area_struct *vma) |
92181f19 | 983 | { |
019132ff DH |
984 | /* |
985 | * This OSPKE check is not strictly necessary at runtime. | |
986 | * But, doing it this way allows compiler optimizations | |
987 | * if pkeys are compiled out. | |
988 | */ | |
33a709b2 | 989 | if (bad_area_access_from_pkeys(error_code, vma)) |
019132ff DH |
990 | __bad_area(regs, error_code, address, vma, SEGV_PKUERR); |
991 | else | |
992 | __bad_area(regs, error_code, address, vma, SEGV_ACCERR); | |
92181f19 NP |
993 | } |
994 | ||
2d4a7167 | 995 | static void |
a6e04aa9 | 996 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
7b2d0dba | 997 | struct vm_area_struct *vma, unsigned int fault) |
92181f19 NP |
998 | { |
999 | struct task_struct *tsk = current; | |
a6e04aa9 | 1000 | int code = BUS_ADRERR; |
92181f19 | 1001 | |
2d4a7167 | 1002 | /* Kernel mode? Handle exceptions or die: */ |
96054569 | 1003 | if (!(error_code & PF_USER)) { |
4fc34901 | 1004 | no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); |
96054569 LT |
1005 | return; |
1006 | } | |
2d4a7167 | 1007 | |
cd1b68f0 | 1008 | /* User-space => ok to do another page fault: */ |
92181f19 NP |
1009 | if (is_prefetch(regs, error_code, address)) |
1010 | return; | |
2d4a7167 IM |
1011 | |
1012 | tsk->thread.cr2 = address; | |
1013 | tsk->thread.error_code = error_code; | |
51e7dc70 | 1014 | tsk->thread.trap_nr = X86_TRAP_PF; |
2d4a7167 | 1015 | |
a6e04aa9 | 1016 | #ifdef CONFIG_MEMORY_FAILURE |
f672b49b | 1017 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
a6e04aa9 AK |
1018 | printk(KERN_ERR |
1019 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | |
1020 | tsk->comm, tsk->pid, address); | |
1021 | code = BUS_MCEERR_AR; | |
1022 | } | |
1023 | #endif | |
7b2d0dba | 1024 | force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); |
92181f19 NP |
1025 | } |
1026 | ||
3a13c4d7 | 1027 | static noinline void |
2d4a7167 | 1028 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
7b2d0dba DH |
1029 | unsigned long address, struct vm_area_struct *vma, |
1030 | unsigned int fault) | |
92181f19 | 1031 | { |
3a13c4d7 | 1032 | if (fatal_signal_pending(current) && !(error_code & PF_USER)) { |
3a13c4d7 JW |
1033 | no_context(regs, error_code, address, 0, 0); |
1034 | return; | |
b80ef10e | 1035 | } |
b80ef10e | 1036 | |
2d4a7167 | 1037 | if (fault & VM_FAULT_OOM) { |
f8626854 AV |
1038 | /* Kernel mode? Handle exceptions or die: */ |
1039 | if (!(error_code & PF_USER)) { | |
4fc34901 AL |
1040 | no_context(regs, error_code, address, |
1041 | SIGSEGV, SEGV_MAPERR); | |
3a13c4d7 | 1042 | return; |
f8626854 AV |
1043 | } |
1044 | ||
c2d23f91 DR |
1045 | /* |
1046 | * We ran out of memory, call the OOM killer, and return the | |
1047 | * userspace (which will retry the fault, or kill us if we got | |
1048 | * oom-killed): | |
1049 | */ | |
1050 | pagefault_out_of_memory(); | |
2d4a7167 | 1051 | } else { |
f672b49b AK |
1052 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
1053 | VM_FAULT_HWPOISON_LARGE)) | |
7b2d0dba | 1054 | do_sigbus(regs, error_code, address, vma, fault); |
33692f27 | 1055 | else if (fault & VM_FAULT_SIGSEGV) |
7b2d0dba | 1056 | bad_area_nosemaphore(regs, error_code, address, vma); |
2d4a7167 IM |
1057 | else |
1058 | BUG(); | |
1059 | } | |
92181f19 NP |
1060 | } |
1061 | ||
d8b57bb7 TG |
1062 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
1063 | { | |
1064 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
1065 | return 0; | |
2d4a7167 | 1066 | |
d8b57bb7 TG |
1067 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
1068 | return 0; | |
b3ecd515 DH |
1069 | /* |
1070 | * Note: We do not do lazy flushing on protection key | |
1071 | * changes, so no spurious fault will ever set PF_PK. | |
1072 | */ | |
1073 | if ((error_code & PF_PK)) | |
1074 | return 1; | |
d8b57bb7 TG |
1075 | |
1076 | return 1; | |
1077 | } | |
1078 | ||
5b727a3b | 1079 | /* |
2d4a7167 IM |
1080 | * Handle a spurious fault caused by a stale TLB entry. |
1081 | * | |
1082 | * This allows us to lazily refresh the TLB when increasing the | |
1083 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
1084 | * eagerly is very expensive since that implies doing a full | |
1085 | * cross-processor TLB flush, even if no stale TLB entries exist | |
1086 | * on other processors. | |
1087 | * | |
31668511 DV |
1088 | * Spurious faults may only occur if the TLB contains an entry with |
1089 | * fewer permission than the page table entry. Non-present (P = 0) | |
1090 | * and reserved bit (R = 1) faults are never spurious. | |
1091 | * | |
5b727a3b JF |
1092 | * There are no security implications to leaving a stale TLB when |
1093 | * increasing the permissions on a page. | |
31668511 DV |
1094 | * |
1095 | * Returns non-zero if a spurious fault was handled, zero otherwise. | |
1096 | * | |
1097 | * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 | |
1098 | * (Optional Invalidation). | |
5b727a3b | 1099 | */ |
9326638c | 1100 | static noinline int |
2d4a7167 | 1101 | spurious_fault(unsigned long error_code, unsigned long address) |
5b727a3b JF |
1102 | { |
1103 | pgd_t *pgd; | |
e0c4f675 | 1104 | p4d_t *p4d; |
5b727a3b JF |
1105 | pud_t *pud; |
1106 | pmd_t *pmd; | |
1107 | pte_t *pte; | |
3c3e5694 | 1108 | int ret; |
5b727a3b | 1109 | |
31668511 DV |
1110 | /* |
1111 | * Only writes to RO or instruction fetches from NX may cause | |
1112 | * spurious faults. | |
1113 | * | |
1114 | * These could be from user or supervisor accesses but the TLB | |
1115 | * is only lazily flushed after a kernel mapping protection | |
1116 | * change, so user accesses are not expected to cause spurious | |
1117 | * faults. | |
1118 | */ | |
1119 | if (error_code != (PF_WRITE | PF_PROT) | |
1120 | && error_code != (PF_INSTR | PF_PROT)) | |
5b727a3b JF |
1121 | return 0; |
1122 | ||
1123 | pgd = init_mm.pgd + pgd_index(address); | |
1124 | if (!pgd_present(*pgd)) | |
1125 | return 0; | |
1126 | ||
e0c4f675 KS |
1127 | p4d = p4d_offset(pgd, address); |
1128 | if (!p4d_present(*p4d)) | |
1129 | return 0; | |
1130 | ||
1131 | if (p4d_large(*p4d)) | |
1132 | return spurious_fault_check(error_code, (pte_t *) p4d); | |
1133 | ||
1134 | pud = pud_offset(p4d, address); | |
5b727a3b JF |
1135 | if (!pud_present(*pud)) |
1136 | return 0; | |
1137 | ||
d8b57bb7 TG |
1138 | if (pud_large(*pud)) |
1139 | return spurious_fault_check(error_code, (pte_t *) pud); | |
1140 | ||
5b727a3b JF |
1141 | pmd = pmd_offset(pud, address); |
1142 | if (!pmd_present(*pmd)) | |
1143 | return 0; | |
1144 | ||
d8b57bb7 TG |
1145 | if (pmd_large(*pmd)) |
1146 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
1147 | ||
5b727a3b | 1148 | pte = pte_offset_kernel(pmd, address); |
954f8571 | 1149 | if (!pte_present(*pte)) |
5b727a3b JF |
1150 | return 0; |
1151 | ||
3c3e5694 SR |
1152 | ret = spurious_fault_check(error_code, pte); |
1153 | if (!ret) | |
1154 | return 0; | |
1155 | ||
1156 | /* | |
2d4a7167 IM |
1157 | * Make sure we have permissions in PMD. |
1158 | * If not, then there's a bug in the page tables: | |
3c3e5694 SR |
1159 | */ |
1160 | ret = spurious_fault_check(error_code, (pte_t *) pmd); | |
1161 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | |
2d4a7167 | 1162 | |
3c3e5694 | 1163 | return ret; |
5b727a3b | 1164 | } |
9326638c | 1165 | NOKPROBE_SYMBOL(spurious_fault); |
5b727a3b | 1166 | |
abd4f750 | 1167 | int show_unhandled_signals = 1; |
1da177e4 | 1168 | |
2d4a7167 | 1169 | static inline int |
68da336a | 1170 | access_error(unsigned long error_code, struct vm_area_struct *vma) |
92181f19 | 1171 | { |
07f146f5 DH |
1172 | /* This is only called for the current mm, so: */ |
1173 | bool foreign = false; | |
e8c6226d DH |
1174 | |
1175 | /* | |
1176 | * Read or write was blocked by protection keys. This is | |
1177 | * always an unconditional error and can never result in | |
1178 | * a follow-up action to resolve the fault, like a COW. | |
1179 | */ | |
1180 | if (error_code & PF_PK) | |
1181 | return 1; | |
1182 | ||
07f146f5 DH |
1183 | /* |
1184 | * Make sure to check the VMA so that we do not perform | |
1185 | * faults just to hit a PF_PK as soon as we fill in a | |
1186 | * page. | |
1187 | */ | |
d61172b4 DH |
1188 | if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), |
1189 | (error_code & PF_INSTR), foreign)) | |
07f146f5 | 1190 | return 1; |
33a709b2 | 1191 | |
68da336a | 1192 | if (error_code & PF_WRITE) { |
2d4a7167 | 1193 | /* write, present and write, not present: */ |
92181f19 NP |
1194 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
1195 | return 1; | |
2d4a7167 | 1196 | return 0; |
92181f19 NP |
1197 | } |
1198 | ||
2d4a7167 IM |
1199 | /* read, present: */ |
1200 | if (unlikely(error_code & PF_PROT)) | |
1201 | return 1; | |
1202 | ||
1203 | /* read, not present: */ | |
1204 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
1205 | return 1; | |
1206 | ||
92181f19 NP |
1207 | return 0; |
1208 | } | |
1209 | ||
0973a06c HS |
1210 | static int fault_in_kernel_space(unsigned long address) |
1211 | { | |
d9517346 | 1212 | return address >= TASK_SIZE_MAX; |
0973a06c HS |
1213 | } |
1214 | ||
40d3cd66 PA |
1215 | static inline bool smap_violation(int error_code, struct pt_regs *regs) |
1216 | { | |
4640c7ee PA |
1217 | if (!IS_ENABLED(CONFIG_X86_SMAP)) |
1218 | return false; | |
1219 | ||
1220 | if (!static_cpu_has(X86_FEATURE_SMAP)) | |
1221 | return false; | |
1222 | ||
40d3cd66 PA |
1223 | if (error_code & PF_USER) |
1224 | return false; | |
1225 | ||
f39b6f0e | 1226 | if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) |
40d3cd66 PA |
1227 | return false; |
1228 | ||
1229 | return true; | |
1230 | } | |
1231 | ||
1da177e4 LT |
1232 | /* |
1233 | * This routine handles page faults. It determines the address, | |
1234 | * and the problem, and then passes it off to one of the appropriate | |
1235 | * routines. | |
d4078e23 PZ |
1236 | * |
1237 | * This function must have noinline because both callers | |
1238 | * {,trace_}do_page_fault() have notrace on. Having this an actual function | |
1239 | * guarantees there's a function trace entry. | |
1da177e4 | 1240 | */ |
9326638c | 1241 | static noinline void |
0ac09f9f JO |
1242 | __do_page_fault(struct pt_regs *regs, unsigned long error_code, |
1243 | unsigned long address) | |
1da177e4 | 1244 | { |
2d4a7167 | 1245 | struct vm_area_struct *vma; |
1da177e4 LT |
1246 | struct task_struct *tsk; |
1247 | struct mm_struct *mm; | |
26178ec1 | 1248 | int fault, major = 0; |
759496ba | 1249 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1da177e4 | 1250 | |
a9ba9a3b AV |
1251 | tsk = current; |
1252 | mm = tsk->mm; | |
2d4a7167 | 1253 | |
f8561296 VN |
1254 | /* |
1255 | * Detect and handle instructions that would cause a page fault for | |
1256 | * both a tracked kernel page and a userspace page. | |
1257 | */ | |
1258 | if (kmemcheck_active(regs)) | |
1259 | kmemcheck_hide(regs); | |
5dfaf90f | 1260 | prefetchw(&mm->mmap_sem); |
f8561296 | 1261 | |
0fd0e3da | 1262 | if (unlikely(kmmio_fault(regs, address))) |
86069782 | 1263 | return; |
1da177e4 LT |
1264 | |
1265 | /* | |
1266 | * We fault-in kernel-space virtual memory on-demand. The | |
1267 | * 'reference' page table is init_mm.pgd. | |
1268 | * | |
1269 | * NOTE! We MUST NOT take any locks for this case. We may | |
1270 | * be in an interrupt or a critical region, and should | |
1271 | * only copy the information from the master page table, | |
1272 | * nothing more. | |
1273 | * | |
1274 | * This verifies that the fault happens in kernel space | |
1275 | * (error_code & 4) == 0, and that the fault was not a | |
8b1bde93 | 1276 | * protection error (error_code & 9) == 0. |
1da177e4 | 1277 | */ |
0973a06c | 1278 | if (unlikely(fault_in_kernel_space(address))) { |
f8561296 VN |
1279 | if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { |
1280 | if (vmalloc_fault(address) >= 0) | |
1281 | return; | |
1282 | ||
1283 | if (kmemcheck_fault(regs, address, error_code)) | |
1284 | return; | |
1285 | } | |
5b727a3b | 1286 | |
2d4a7167 | 1287 | /* Can handle a stale RO->RW TLB: */ |
92181f19 | 1288 | if (spurious_fault(error_code, address)) |
5b727a3b JF |
1289 | return; |
1290 | ||
2d4a7167 | 1291 | /* kprobes don't want to hook the spurious faults: */ |
e00b12e6 | 1292 | if (kprobes_fault(regs)) |
9be260a6 | 1293 | return; |
f8c2ee22 HH |
1294 | /* |
1295 | * Don't take the mm semaphore here. If we fixup a prefetch | |
2d4a7167 | 1296 | * fault we could otherwise deadlock: |
f8c2ee22 | 1297 | */ |
7b2d0dba | 1298 | bad_area_nosemaphore(regs, error_code, address, NULL); |
2d4a7167 | 1299 | |
92181f19 | 1300 | return; |
f8c2ee22 HH |
1301 | } |
1302 | ||
2d4a7167 | 1303 | /* kprobes don't want to hook the spurious faults: */ |
e00b12e6 | 1304 | if (unlikely(kprobes_fault(regs))) |
9be260a6 | 1305 | return; |
8c914cb7 | 1306 | |
66c58156 | 1307 | if (unlikely(error_code & PF_RSVD)) |
92181f19 | 1308 | pgtable_bad(regs, error_code, address); |
1da177e4 | 1309 | |
4640c7ee | 1310 | if (unlikely(smap_violation(error_code, regs))) { |
7b2d0dba | 1311 | bad_area_nosemaphore(regs, error_code, address, NULL); |
4640c7ee | 1312 | return; |
40d3cd66 PA |
1313 | } |
1314 | ||
1da177e4 | 1315 | /* |
2d4a7167 | 1316 | * If we're in an interrupt, have no user context or are running |
70ffdb93 | 1317 | * in a region with pagefaults disabled then we must not take the fault |
1da177e4 | 1318 | */ |
70ffdb93 | 1319 | if (unlikely(faulthandler_disabled() || !mm)) { |
7b2d0dba | 1320 | bad_area_nosemaphore(regs, error_code, address, NULL); |
92181f19 NP |
1321 | return; |
1322 | } | |
1da177e4 | 1323 | |
e00b12e6 PZ |
1324 | /* |
1325 | * It's safe to allow irq's after cr2 has been saved and the | |
1326 | * vmalloc fault has been handled. | |
1327 | * | |
1328 | * User-mode registers count as a user access even for any | |
1329 | * potential system fault or CPU buglet: | |
1330 | */ | |
f39b6f0e | 1331 | if (user_mode(regs)) { |
e00b12e6 PZ |
1332 | local_irq_enable(); |
1333 | error_code |= PF_USER; | |
1334 | flags |= FAULT_FLAG_USER; | |
1335 | } else { | |
1336 | if (regs->flags & X86_EFLAGS_IF) | |
1337 | local_irq_enable(); | |
1338 | } | |
1339 | ||
1340 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
1341 | ||
759496ba JW |
1342 | if (error_code & PF_WRITE) |
1343 | flags |= FAULT_FLAG_WRITE; | |
d61172b4 DH |
1344 | if (error_code & PF_INSTR) |
1345 | flags |= FAULT_FLAG_INSTRUCTION; | |
759496ba | 1346 | |
3a1dfe6e IM |
1347 | /* |
1348 | * When running in the kernel we expect faults to occur only to | |
2d4a7167 IM |
1349 | * addresses in user space. All other faults represent errors in |
1350 | * the kernel and should generate an OOPS. Unfortunately, in the | |
1351 | * case of an erroneous fault occurring in a code path which already | |
1352 | * holds mmap_sem we will deadlock attempting to validate the fault | |
1353 | * against the address space. Luckily the kernel only validly | |
1354 | * references user space from well defined areas of code, which are | |
1355 | * listed in the exceptions table. | |
1da177e4 LT |
1356 | * |
1357 | * As the vast majority of faults will be valid we will only perform | |
2d4a7167 IM |
1358 | * the source reference check when there is a possibility of a |
1359 | * deadlock. Attempt to lock the address space, if we cannot we then | |
1360 | * validate the source. If this is invalid we can skip the address | |
1361 | * space check, thus avoiding the deadlock: | |
1da177e4 | 1362 | */ |
92181f19 | 1363 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
66c58156 | 1364 | if ((error_code & PF_USER) == 0 && |
92181f19 | 1365 | !search_exception_tables(regs->ip)) { |
7b2d0dba | 1366 | bad_area_nosemaphore(regs, error_code, address, NULL); |
92181f19 NP |
1367 | return; |
1368 | } | |
d065bd81 | 1369 | retry: |
1da177e4 | 1370 | down_read(&mm->mmap_sem); |
01006074 PZ |
1371 | } else { |
1372 | /* | |
2d4a7167 IM |
1373 | * The above down_read_trylock() might have succeeded in |
1374 | * which case we'll have missed the might_sleep() from | |
1375 | * down_read(): | |
01006074 PZ |
1376 | */ |
1377 | might_sleep(); | |
1da177e4 LT |
1378 | } |
1379 | ||
1380 | vma = find_vma(mm, address); | |
92181f19 NP |
1381 | if (unlikely(!vma)) { |
1382 | bad_area(regs, error_code, address); | |
1383 | return; | |
1384 | } | |
1385 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 1386 | goto good_area; |
92181f19 NP |
1387 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
1388 | bad_area(regs, error_code, address); | |
1389 | return; | |
1390 | } | |
33cb5243 | 1391 | if (error_code & PF_USER) { |
6f4d368e HH |
1392 | /* |
1393 | * Accessing the stack below %sp is always a bug. | |
1394 | * The large cushion allows instructions like enter | |
2d4a7167 | 1395 | * and pusha to work. ("enter $65535, $31" pushes |
6f4d368e | 1396 | * 32 pointers and then decrements %sp by 65535.) |
03fdc2c2 | 1397 | */ |
92181f19 NP |
1398 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
1399 | bad_area(regs, error_code, address); | |
1400 | return; | |
1401 | } | |
1da177e4 | 1402 | } |
92181f19 NP |
1403 | if (unlikely(expand_stack(vma, address))) { |
1404 | bad_area(regs, error_code, address); | |
1405 | return; | |
1406 | } | |
1407 | ||
1408 | /* | |
1409 | * Ok, we have a good vm_area for this memory access, so | |
1410 | * we can handle it.. | |
1411 | */ | |
1da177e4 | 1412 | good_area: |
68da336a | 1413 | if (unlikely(access_error(error_code, vma))) { |
7b2d0dba | 1414 | bad_area_access_error(regs, error_code, address, vma); |
92181f19 | 1415 | return; |
1da177e4 LT |
1416 | } |
1417 | ||
1418 | /* | |
1419 | * If for any reason at all we couldn't handle the fault, | |
1420 | * make sure we exit gracefully rather than endlessly redo | |
9a95f3cf PC |
1421 | * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if |
1422 | * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. | |
1da177e4 | 1423 | */ |
dcddffd4 | 1424 | fault = handle_mm_fault(vma, address, flags); |
26178ec1 | 1425 | major |= fault & VM_FAULT_MAJOR; |
2d4a7167 | 1426 | |
3a13c4d7 | 1427 | /* |
26178ec1 LT |
1428 | * If we need to retry the mmap_sem has already been released, |
1429 | * and if there is a fatal signal pending there is no guarantee | |
1430 | * that we made any progress. Handle this case first. | |
3a13c4d7 | 1431 | */ |
26178ec1 LT |
1432 | if (unlikely(fault & VM_FAULT_RETRY)) { |
1433 | /* Retry at most once */ | |
1434 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
1435 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
1436 | flags |= FAULT_FLAG_TRIED; | |
1437 | if (!fatal_signal_pending(tsk)) | |
1438 | goto retry; | |
1439 | } | |
1440 | ||
1441 | /* User mode? Just return to handle the fatal exception */ | |
cf3c0a15 | 1442 | if (flags & FAULT_FLAG_USER) |
26178ec1 LT |
1443 | return; |
1444 | ||
1445 | /* Not returning to user mode? Handle exceptions or die: */ | |
1446 | no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); | |
3a13c4d7 | 1447 | return; |
26178ec1 | 1448 | } |
3a13c4d7 | 1449 | |
26178ec1 | 1450 | up_read(&mm->mmap_sem); |
3a13c4d7 | 1451 | if (unlikely(fault & VM_FAULT_ERROR)) { |
7b2d0dba | 1452 | mm_fault_error(regs, error_code, address, vma, fault); |
3a13c4d7 | 1453 | return; |
37b23e05 KM |
1454 | } |
1455 | ||
d065bd81 | 1456 | /* |
26178ec1 LT |
1457 | * Major/minor page fault accounting. If any of the events |
1458 | * returned VM_FAULT_MAJOR, we account it as a major fault. | |
d065bd81 | 1459 | */ |
26178ec1 LT |
1460 | if (major) { |
1461 | tsk->maj_flt++; | |
1462 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
1463 | } else { | |
1464 | tsk->min_flt++; | |
1465 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
ac17dc8e | 1466 | } |
d729ab35 | 1467 | |
8c938f9f | 1468 | check_v8086_mode(regs, address, tsk); |
1da177e4 | 1469 | } |
9326638c | 1470 | NOKPROBE_SYMBOL(__do_page_fault); |
6ba3c97a | 1471 | |
9326638c | 1472 | dotraplinkage void notrace |
6ba3c97a FW |
1473 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1474 | { | |
d4078e23 | 1475 | unsigned long address = read_cr2(); /* Get the faulting address */ |
6c1e0256 | 1476 | enum ctx_state prev_state; |
d4078e23 PZ |
1477 | |
1478 | /* | |
1479 | * We must have this function tagged with __kprobes, notrace and call | |
1480 | * read_cr2() before calling anything else. To avoid calling any kind | |
1481 | * of tracing machinery before we've observed the CR2 value. | |
1482 | * | |
1483 | * exception_{enter,exit}() contain all sorts of tracepoints. | |
1484 | */ | |
6c1e0256 FW |
1485 | |
1486 | prev_state = exception_enter(); | |
0ac09f9f | 1487 | __do_page_fault(regs, error_code, address); |
6c1e0256 | 1488 | exception_exit(prev_state); |
6ba3c97a | 1489 | } |
9326638c | 1490 | NOKPROBE_SYMBOL(do_page_fault); |
25c74b10 | 1491 | |
d4078e23 | 1492 | #ifdef CONFIG_TRACING |
9326638c MH |
1493 | static nokprobe_inline void |
1494 | trace_page_fault_entries(unsigned long address, struct pt_regs *regs, | |
1495 | unsigned long error_code) | |
d34603b0 SA |
1496 | { |
1497 | if (user_mode(regs)) | |
d4078e23 | 1498 | trace_page_fault_user(address, regs, error_code); |
d34603b0 | 1499 | else |
d4078e23 | 1500 | trace_page_fault_kernel(address, regs, error_code); |
d34603b0 SA |
1501 | } |
1502 | ||
9326638c | 1503 | dotraplinkage void notrace |
25c74b10 SA |
1504 | trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1505 | { | |
0ac09f9f JO |
1506 | /* |
1507 | * The exception_enter and tracepoint processing could | |
1508 | * trigger another page faults (user space callchain | |
1509 | * reading) and destroy the original cr2 value, so read | |
1510 | * the faulting address now. | |
1511 | */ | |
1512 | unsigned long address = read_cr2(); | |
d4078e23 | 1513 | enum ctx_state prev_state; |
25c74b10 SA |
1514 | |
1515 | prev_state = exception_enter(); | |
d4078e23 | 1516 | trace_page_fault_entries(address, regs, error_code); |
0ac09f9f | 1517 | __do_page_fault(regs, error_code, address); |
25c74b10 SA |
1518 | exception_exit(prev_state); |
1519 | } | |
9326638c | 1520 | NOKPROBE_SYMBOL(trace_do_page_fault); |
d4078e23 | 1521 | #endif /* CONFIG_TRACING */ |