]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 | 3 | * S390 version |
a53c8fab | 4 | * Copyright IBM Corp. 1999 |
1da177e4 LT |
5 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
6 | * Ulrich Weigand (uweigand@de.ibm.com) | |
7 | * | |
8 | * Derived from "arch/i386/mm/fault.c" | |
9 | * Copyright (C) 1995 Linus Torvalds | |
10 | */ | |
11 | ||
052ff461 | 12 | #include <linux/kernel_stat.h> |
cdd6c482 | 13 | #include <linux/perf_event.h> |
1da177e4 LT |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> | |
b17b0153 | 16 | #include <linux/sched/debug.h> |
1da177e4 LT |
17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/ptrace.h> | |
22 | #include <linux/mman.h> | |
23 | #include <linux/mm.h> | |
7757591a | 24 | #include <linux/compat.h> |
1da177e4 | 25 | #include <linux/smp.h> |
1eeb66a1 | 26 | #include <linux/kdebug.h> |
1da177e4 LT |
27 | #include <linux/init.h> |
28 | #include <linux/console.h> | |
dcc096c5 | 29 | #include <linux/extable.h> |
1da177e4 | 30 | #include <linux/hardirq.h> |
4ba069b8 | 31 | #include <linux/kprobes.h> |
be5ec363 | 32 | #include <linux/uaccess.h> |
53492b1d | 33 | #include <linux/hugetlb.h> |
e41ba111 | 34 | #include <linux/kfence.h> |
cbb870c8 | 35 | #include <asm/asm-offsets.h> |
1ec2772e | 36 | #include <asm/diag.h> |
1e133ab2 | 37 | #include <asm/gmap.h> |
d7b250e2 | 38 | #include <asm/irq.h> |
6252d702 | 39 | #include <asm/mmu_context.h> |
a0616cde | 40 | #include <asm/facility.h> |
084ea4d6 | 41 | #include <asm/uv.h> |
a806170e | 42 | #include "../kernel/entry.h" |
1da177e4 | 43 | |
1da177e4 | 44 | #define __FAIL_ADDR_MASK -4096L |
1da177e4 LT |
45 | #define __SUBCODE_MASK 0x0600 |
46 | #define __PF_RES_FIELD 0x8000000000000000ULL | |
1da177e4 | 47 | |
12437759 CB |
48 | #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000) |
49 | #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000) | |
50 | #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000) | |
51 | #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000) | |
52 | #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000) | |
50d7280d | 53 | |
0aaba41b MS |
54 | enum fault_type { |
55 | KERNEL_FAULT, | |
56 | USER_FAULT, | |
0aaba41b MS |
57 | GMAP_FAULT, |
58 | }; | |
59 | ||
a4f32bdb | 60 | static unsigned long store_indication __read_mostly; |
92f842ea | 61 | |
a4f32bdb | 62 | static int __init fault_init(void) |
92f842ea | 63 | { |
a4f32bdb | 64 | if (test_facility(75)) |
92f842ea | 65 | store_indication = 0xc00; |
a4f32bdb | 66 | return 0; |
92f842ea | 67 | } |
a4f32bdb | 68 | early_initcall(fault_init); |
92f842ea | 69 | |
1da177e4 | 70 | /* |
0aaba41b | 71 | * Find out which address space caused the exception. |
1da177e4 | 72 | */ |
bf2f1eee | 73 | static enum fault_type get_fault_type(struct pt_regs *regs) |
1da177e4 | 74 | { |
457f2180 HC |
75 | unsigned long trans_exc_code; |
76 | ||
457f2180 | 77 | trans_exc_code = regs->int_parm_long & 3; |
0aaba41b MS |
78 | if (likely(trans_exc_code == 0)) { |
79 | /* primary space exception */ | |
87d59863 | 80 | if (user_mode(regs)) |
0aaba41b | 81 | return USER_FAULT; |
87d59863 | 82 | if (!IS_ENABLED(CONFIG_PGSTE)) |
0aaba41b | 83 | return KERNEL_FAULT; |
87d59863 HC |
84 | if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) |
85 | return GMAP_FAULT; | |
86 | return KERNEL_FAULT; | |
0aaba41b | 87 | } |
87d59863 HC |
88 | if (trans_exc_code == 2) |
89 | return USER_FAULT; | |
962f0af8 GS |
90 | if (trans_exc_code == 1) { |
91 | /* access register mode, not used in the kernel */ | |
92 | return USER_FAULT; | |
93 | } | |
0aaba41b MS |
94 | /* home space exception -> access via kernel ASCE */ |
95 | return KERNEL_FAULT; | |
1da177e4 LT |
96 | } |
97 | ||
3b7df342 HC |
98 | static int bad_address(void *p) |
99 | { | |
100 | unsigned long dummy; | |
101 | ||
25f12ae4 | 102 | return get_kernel_nofault(dummy, (unsigned long *)p); |
3b7df342 HC |
103 | } |
104 | ||
3b7df342 HC |
105 | static void dump_pagetable(unsigned long asce, unsigned long address) |
106 | { | |
fe7b2747 | 107 | unsigned long *table = __va(asce & _ASCE_ORIGIN); |
3b7df342 HC |
108 | |
109 | pr_alert("AS:%016lx ", asce); | |
110 | switch (asce & _ASCE_TYPE_MASK) { | |
111 | case _ASCE_TYPE_REGION1: | |
f1c1174f | 112 | table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; |
3b7df342 HC |
113 | if (bad_address(table)) |
114 | goto bad; | |
115 | pr_cont("R1:%016lx ", *table); | |
116 | if (*table & _REGION_ENTRY_INVALID) | |
117 | goto out; | |
118 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
2c7749b9 | 119 | fallthrough; |
3b7df342 | 120 | case _ASCE_TYPE_REGION2: |
f1c1174f | 121 | table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; |
3b7df342 HC |
122 | if (bad_address(table)) |
123 | goto bad; | |
124 | pr_cont("R2:%016lx ", *table); | |
125 | if (*table & _REGION_ENTRY_INVALID) | |
126 | goto out; | |
127 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
2c7749b9 | 128 | fallthrough; |
3b7df342 | 129 | case _ASCE_TYPE_REGION3: |
f1c1174f | 130 | table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; |
3b7df342 HC |
131 | if (bad_address(table)) |
132 | goto bad; | |
133 | pr_cont("R3:%016lx ", *table); | |
134 | if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) | |
135 | goto out; | |
136 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
2c7749b9 | 137 | fallthrough; |
3b7df342 | 138 | case _ASCE_TYPE_SEGMENT: |
f1c1174f | 139 | table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; |
3b7df342 HC |
140 | if (bad_address(table)) |
141 | goto bad; | |
91c0837e | 142 | pr_cont("S:%016lx ", *table); |
3b7df342 HC |
143 | if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) |
144 | goto out; | |
145 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | |
146 | } | |
f1c1174f | 147 | table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; |
3b7df342 HC |
148 | if (bad_address(table)) |
149 | goto bad; | |
150 | pr_cont("P:%016lx ", *table); | |
151 | out: | |
152 | pr_cont("\n"); | |
153 | return; | |
154 | bad: | |
155 | pr_cont("BAD\n"); | |
156 | } | |
157 | ||
3b7df342 HC |
158 | static void dump_fault_info(struct pt_regs *regs) |
159 | { | |
160 | unsigned long asce; | |
161 | ||
5d7eccec HC |
162 | pr_alert("Failing address: %016lx TEID: %016lx\n", |
163 | regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); | |
3b7df342 HC |
164 | pr_alert("Fault in "); |
165 | switch (regs->int_parm_long & 3) { | |
166 | case 3: | |
167 | pr_cont("home space "); | |
168 | break; | |
169 | case 2: | |
170 | pr_cont("secondary space "); | |
171 | break; | |
172 | case 1: | |
173 | pr_cont("access register "); | |
174 | break; | |
175 | case 0: | |
176 | pr_cont("primary space "); | |
177 | break; | |
178 | } | |
179 | pr_cont("mode while using "); | |
0aaba41b MS |
180 | switch (get_fault_type(regs)) { |
181 | case USER_FAULT: | |
3b7df342 HC |
182 | asce = S390_lowcore.user_asce; |
183 | pr_cont("user "); | |
0aaba41b | 184 | break; |
0aaba41b MS |
185 | case GMAP_FAULT: |
186 | asce = ((struct gmap *) S390_lowcore.gmap)->asce; | |
187 | pr_cont("gmap "); | |
188 | break; | |
189 | case KERNEL_FAULT: | |
190 | asce = S390_lowcore.kernel_asce; | |
191 | pr_cont("kernel "); | |
192 | break; | |
bf2f1eee MY |
193 | default: |
194 | unreachable(); | |
3b7df342 HC |
195 | } |
196 | pr_cont("ASCE.\n"); | |
197 | dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); | |
198 | } | |
199 | ||
5d7eccec HC |
200 | int show_unhandled_signals = 1; |
201 | ||
202 | void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) | |
ab3c68ee HC |
203 | { |
204 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) | |
205 | return; | |
206 | if (!unhandled_signal(current, signr)) | |
207 | return; | |
208 | if (!printk_ratelimit()) | |
209 | return; | |
db1177ee | 210 | printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", |
413d4047 | 211 | regs->int_code & 0xffff, regs->int_code >> 17); |
9cb1ccec | 212 | print_vma_addr(KERN_CONT "in ", regs->psw.addr); |
aa33c8cb | 213 | printk(KERN_CONT "\n"); |
5d7eccec HC |
214 | if (is_mm_fault) |
215 | dump_fault_info(regs); | |
ab3c68ee HC |
216 | show_regs(regs); |
217 | } | |
218 | ||
1da177e4 LT |
219 | /* |
220 | * Send SIGSEGV to task. This is an external routine | |
221 | * to keep the stack usage of do_page_fault small. | |
222 | */ | |
aa33c8cb | 223 | static noinline void do_sigsegv(struct pt_regs *regs, int si_code) |
1da177e4 | 224 | { |
5d7eccec | 225 | report_user_fault(regs, SIGSEGV, 1); |
9507a5d0 | 226 | force_sig_fault(SIGSEGV, si_code, |
2e1661d2 | 227 | (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); |
1da177e4 LT |
228 | } |
229 | ||
a80313ff GS |
230 | const struct exception_table_entry *s390_search_extables(unsigned long addr) |
231 | { | |
232 | const struct exception_table_entry *fixup; | |
233 | ||
c78d0c74 HC |
234 | fixup = search_extable(__start_amode31_ex_table, |
235 | __stop_amode31_ex_table - __start_amode31_ex_table, | |
a80313ff GS |
236 | addr); |
237 | if (!fixup) | |
238 | fixup = search_exception_tables(addr); | |
239 | return fixup; | |
240 | } | |
241 | ||
aa33c8cb | 242 | static noinline void do_no_context(struct pt_regs *regs) |
10c1031f MS |
243 | { |
244 | const struct exception_table_entry *fixup; | |
245 | ||
246 | /* Are we prepared to handle this kernel fault? */ | |
a80313ff | 247 | fixup = s390_search_extables(regs->psw.addr); |
05a68e89 | 248 | if (fixup && ex_handle(fixup, regs)) |
10c1031f | 249 | return; |
10c1031f MS |
250 | |
251 | /* | |
252 | * Oops. The kernel tried to access some bad page. We'll have to | |
253 | * terminate things with extreme prejudice. | |
254 | */ | |
0aaba41b | 255 | if (get_fault_type(regs) == KERNEL_FAULT) |
10c1031f | 256 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" |
3b7df342 | 257 | " in virtual kernel address space\n"); |
10c1031f MS |
258 | else |
259 | printk(KERN_ALERT "Unable to handle kernel paging request" | |
3b7df342 | 260 | " in virtual user address space\n"); |
3b7df342 | 261 | dump_fault_info(regs); |
aa33c8cb | 262 | die(regs, "Oops"); |
10c1031f MS |
263 | do_exit(SIGKILL); |
264 | } | |
265 | ||
aa33c8cb | 266 | static noinline void do_low_address(struct pt_regs *regs) |
10c1031f MS |
267 | { |
268 | /* Low-address protection hit in kernel mode means | |
269 | NULL pointer write access in kernel mode. */ | |
270 | if (regs->psw.mask & PSW_MASK_PSTATE) { | |
271 | /* Low-address protection hit in user mode 'cannot happen'. */ | |
aa33c8cb | 272 | die (regs, "Low-address protection"); |
10c1031f MS |
273 | do_exit(SIGKILL); |
274 | } | |
275 | ||
aa33c8cb | 276 | do_no_context(regs); |
10c1031f MS |
277 | } |
278 | ||
aa33c8cb | 279 | static noinline void do_sigbus(struct pt_regs *regs) |
10c1031f | 280 | { |
10c1031f MS |
281 | /* |
282 | * Send a sigbus, regardless of whether we were in kernel | |
283 | * or user mode. | |
284 | */ | |
9507a5d0 | 285 | force_sig_fault(SIGBUS, BUS_ADRERR, |
2e1661d2 | 286 | (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); |
10c1031f MS |
287 | } |
288 | ||
50a7ca3c SJ |
289 | static noinline void do_fault_error(struct pt_regs *regs, int access, |
290 | vm_fault_t fault) | |
50d7280d MS |
291 | { |
292 | int si_code; | |
293 | ||
294 | switch (fault) { | |
295 | case VM_FAULT_BADACCESS: | |
50d7280d MS |
296 | case VM_FAULT_BADMAP: |
297 | /* Bad memory access. Check if it is kernel or user space. */ | |
7d256175 | 298 | if (user_mode(regs)) { |
50d7280d MS |
299 | /* User mode accesses just cause a SIGSEGV */ |
300 | si_code = (fault == VM_FAULT_BADMAP) ? | |
301 | SEGV_MAPERR : SEGV_ACCERR; | |
aa33c8cb | 302 | do_sigsegv(regs, si_code); |
57d7f939 | 303 | break; |
50d7280d | 304 | } |
2c7749b9 | 305 | fallthrough; |
50d7280d | 306 | case VM_FAULT_BADCONTEXT: |
24eb3a82 | 307 | case VM_FAULT_PFAULT: |
aa33c8cb | 308 | do_no_context(regs); |
50d7280d | 309 | break; |
f2c76e3b HC |
310 | case VM_FAULT_SIGNAL: |
311 | if (!user_mode(regs)) | |
312 | do_no_context(regs); | |
313 | break; | |
50d7280d | 314 | default: /* fault & VM_FAULT_ERROR */ |
99583181 | 315 | if (fault & VM_FAULT_OOM) { |
7d256175 | 316 | if (!user_mode(regs)) |
aa33c8cb | 317 | do_no_context(regs); |
99583181 HC |
318 | else |
319 | pagefault_out_of_memory(); | |
33692f27 LT |
320 | } else if (fault & VM_FAULT_SIGSEGV) { |
321 | /* Kernel mode? Handle exceptions or die */ | |
322 | if (!user_mode(regs)) | |
323 | do_no_context(regs); | |
324 | else | |
325 | do_sigsegv(regs, SEGV_MAPERR); | |
99583181 | 326 | } else if (fault & VM_FAULT_SIGBUS) { |
50d7280d | 327 | /* Kernel mode? Handle exceptions or die */ |
7d256175 | 328 | if (!user_mode(regs)) |
aa33c8cb | 329 | do_no_context(regs); |
36bf9680 | 330 | else |
aa33c8cb | 331 | do_sigbus(regs); |
50d7280d MS |
332 | } else |
333 | BUG(); | |
334 | break; | |
335 | } | |
336 | } | |
337 | ||
1da177e4 LT |
338 | /* |
339 | * This routine handles page faults. It determines the address, | |
340 | * and the problem, and then passes it off to one of the appropriate | |
341 | * routines. | |
342 | * | |
50d7280d | 343 | * interruption code (int_code): |
7904aaa8 | 344 | * 04 Protection -> Write-Protection (suppression) |
1da177e4 LT |
345 | * 10 Segment translation -> Not present (nullification) |
346 | * 11 Page translation -> Not present (nullification) | |
347 | * 3b Region third trans. -> Not present (nullification) | |
348 | */ | |
50a7ca3c | 349 | static inline vm_fault_t do_exception(struct pt_regs *regs, int access) |
1da177e4 | 350 | { |
24eb3a82 | 351 | struct gmap *gmap; |
10c1031f MS |
352 | struct task_struct *tsk; |
353 | struct mm_struct *mm; | |
354 | struct vm_area_struct *vma; | |
0aaba41b | 355 | enum fault_type type; |
aa33c8cb | 356 | unsigned long trans_exc_code; |
10c1031f | 357 | unsigned long address; |
33ce6140 | 358 | unsigned int flags; |
50a7ca3c | 359 | vm_fault_t fault; |
e41ba111 | 360 | bool is_write; |
1da177e4 | 361 | |
39efd4ec MS |
362 | tsk = current; |
363 | /* | |
364 | * The instruction that caused the program check has | |
365 | * been nullified. Don't signal single step via SIGTRAP. | |
366 | */ | |
56e62a73 | 367 | clear_thread_flag(TIF_PER_TRAP); |
39efd4ec | 368 | |
b98cca44 | 369 | if (kprobe_page_fault(regs, 14)) |
50d7280d | 370 | return 0; |
4ba069b8 | 371 | |
10c1031f | 372 | mm = tsk->mm; |
aa33c8cb | 373 | trans_exc_code = regs->int_parm_long; |
e41ba111 SS |
374 | address = trans_exc_code & __FAIL_ADDR_MASK; |
375 | is_write = (trans_exc_code & store_indication) == 0x400; | |
1da177e4 | 376 | |
1da177e4 LT |
377 | /* |
378 | * Verify that the fault happened in user space, that | |
379 | * we are not in an interrupt and that there is a | |
380 | * user context. | |
381 | */ | |
50d7280d | 382 | fault = VM_FAULT_BADCONTEXT; |
0aaba41b MS |
383 | type = get_fault_type(regs); |
384 | switch (type) { | |
385 | case KERNEL_FAULT: | |
e41ba111 SS |
386 | if (kfence_handle_page_fault(address, is_write, regs)) |
387 | return 0; | |
0aaba41b | 388 | goto out; |
0aaba41b MS |
389 | case USER_FAULT: |
390 | case GMAP_FAULT: | |
391 | if (faulthandler_disabled() || !mm) | |
392 | goto out; | |
393 | break; | |
394 | } | |
1da177e4 | 395 | |
a8b0ca17 | 396 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
dde16072 | 397 | flags = FAULT_FLAG_DEFAULT; |
759496ba JW |
398 | if (user_mode(regs)) |
399 | flags |= FAULT_FLAG_USER; | |
e41ba111 | 400 | if (access == VM_WRITE || is_write) |
33ce6140 | 401 | flags |= FAULT_FLAG_WRITE; |
d8ed45c5 | 402 | mmap_read_lock(mm); |
1da177e4 | 403 | |
0aaba41b MS |
404 | gmap = NULL; |
405 | if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { | |
406 | gmap = (struct gmap *) S390_lowcore.gmap; | |
527e30b4 | 407 | current->thread.gmap_addr = address; |
4be130a0 | 408 | current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); |
4a494439 | 409 | current->thread.gmap_int_code = regs->int_code & 0xffff; |
527e30b4 | 410 | address = __gmap_translate(gmap, address); |
e5992f2e MS |
411 | if (address == -EFAULT) { |
412 | fault = VM_FAULT_BADMAP; | |
413 | goto out_up; | |
414 | } | |
24eb3a82 DD |
415 | if (gmap->pfault_enabled) |
416 | flags |= FAULT_FLAG_RETRY_NOWAIT; | |
e5992f2e | 417 | } |
e5992f2e MS |
418 | |
419 | retry: | |
50d7280d | 420 | fault = VM_FAULT_BADMAP; |
482b05dd GS |
421 | vma = find_vma(mm, address); |
422 | if (!vma) | |
50d7280d | 423 | goto out_up; |
c1821c2e | 424 | |
50d7280d MS |
425 | if (unlikely(vma->vm_start > address)) { |
426 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
427 | goto out_up; | |
428 | if (expand_stack(vma, address)) | |
429 | goto out_up; | |
430 | } | |
431 | ||
432 | /* | |
433 | * Ok, we have a good vm_area for this memory access, so | |
434 | * we can handle it.. | |
435 | */ | |
436 | fault = VM_FAULT_BADACCESS; | |
1ab947de | 437 | if (unlikely(!(vma->vm_flags & access))) |
50d7280d | 438 | goto out_up; |
1da177e4 | 439 | |
53492b1d GS |
440 | if (is_vm_hugetlb_page(vma)) |
441 | address &= HPAGE_MASK; | |
1da177e4 LT |
442 | /* |
443 | * If for any reason at all we couldn't handle the fault, | |
444 | * make sure we exit gracefully rather than endlessly redo | |
445 | * the fault. | |
446 | */ | |
35e45f3e | 447 | fault = handle_mm_fault(vma, address, flags, regs); |
4ef87322 | 448 | if (fault_signal_pending(fault, regs)) { |
f2c76e3b | 449 | fault = VM_FAULT_SIGNAL; |
306d6c49 CI |
450 | if (flags & FAULT_FLAG_RETRY_NOWAIT) |
451 | goto out_up; | |
f2c76e3b HC |
452 | goto out; |
453 | } | |
50d7280d MS |
454 | if (unlikely(fault & VM_FAULT_ERROR)) |
455 | goto out_up; | |
456 | ||
33ce6140 | 457 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
33ce6140 | 458 | if (fault & VM_FAULT_RETRY) { |
0aaba41b MS |
459 | if (IS_ENABLED(CONFIG_PGSTE) && gmap && |
460 | (flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
24eb3a82 | 461 | /* FAULT_FLAG_RETRY_NOWAIT has been set, |
c1e8d7c6 | 462 | * mmap_lock has not been released */ |
24eb3a82 DD |
463 | current->thread.gmap_pfault = 1; |
464 | fault = VM_FAULT_PFAULT; | |
465 | goto out_up; | |
466 | } | |
4064b982 | 467 | flags &= ~FAULT_FLAG_RETRY_NOWAIT; |
45cac65b | 468 | flags |= FAULT_FLAG_TRIED; |
d8ed45c5 | 469 | mmap_read_lock(mm); |
33ce6140 HC |
470 | goto retry; |
471 | } | |
bde69af2 | 472 | } |
0aaba41b | 473 | if (IS_ENABLED(CONFIG_PGSTE) && gmap) { |
527e30b4 MS |
474 | address = __gmap_link(gmap, current->thread.gmap_addr, |
475 | address); | |
476 | if (address == -EFAULT) { | |
477 | fault = VM_FAULT_BADMAP; | |
478 | goto out_up; | |
479 | } | |
480 | if (address == -ENOMEM) { | |
481 | fault = VM_FAULT_OOM; | |
482 | goto out_up; | |
483 | } | |
484 | } | |
50d7280d MS |
485 | fault = 0; |
486 | out_up: | |
d8ed45c5 | 487 | mmap_read_unlock(mm); |
50d7280d MS |
488 | out: |
489 | return fault; | |
1da177e4 LT |
490 | } |
491 | ||
7a5388de | 492 | void do_protection_exception(struct pt_regs *regs) |
1da177e4 | 493 | { |
aa33c8cb | 494 | unsigned long trans_exc_code; |
50a7ca3c SJ |
495 | int access; |
496 | vm_fault_t fault; | |
61365e13 | 497 | |
aa33c8cb | 498 | trans_exc_code = regs->int_parm_long; |
f752ac4d MS |
499 | /* |
500 | * Protection exceptions are suppressing, decrement psw address. | |
501 | * The exception to this rule are aborted transactions, for these | |
502 | * the PSW already points to the correct location. | |
503 | */ | |
504 | if (!(regs->int_code & 0x200)) | |
505 | regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); | |
10c1031f MS |
506 | /* |
507 | * Check for low-address protection. This needs to be treated | |
508 | * as a special case because the translation exception code | |
509 | * field is not guaranteed to contain valid data in this case. | |
510 | */ | |
61365e13 | 511 | if (unlikely(!(trans_exc_code & 4))) { |
aa33c8cb | 512 | do_low_address(regs); |
10c1031f MS |
513 | return; |
514 | } | |
57d7f939 MS |
515 | if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) { |
516 | regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) | | |
517 | (regs->psw.addr & PAGE_MASK); | |
518 | access = VM_EXEC; | |
519 | fault = VM_FAULT_BADACCESS; | |
520 | } else { | |
521 | access = VM_WRITE; | |
522 | fault = do_exception(regs, access); | |
523 | } | |
50d7280d | 524 | if (unlikely(fault)) |
57d7f939 | 525 | do_fault_error(regs, access, fault); |
1da177e4 | 526 | } |
7a5388de | 527 | NOKPROBE_SYMBOL(do_protection_exception); |
1da177e4 | 528 | |
7a5388de | 529 | void do_dat_exception(struct pt_regs *regs) |
1da177e4 | 530 | { |
50a7ca3c SJ |
531 | int access; |
532 | vm_fault_t fault; | |
50d7280d | 533 | |
6cb4d9a2 | 534 | access = VM_ACCESS_FLAGS; |
aa33c8cb | 535 | fault = do_exception(regs, access); |
50d7280d | 536 | if (unlikely(fault)) |
57d7f939 | 537 | do_fault_error(regs, access, fault); |
1da177e4 | 538 | } |
7a5388de | 539 | NOKPROBE_SYMBOL(do_dat_exception); |
1da177e4 | 540 | |
1da177e4 LT |
541 | #ifdef CONFIG_PFAULT |
542 | /* | |
543 | * 'pfault' pseudo page faults routines. | |
544 | */ | |
fb0a9d7e | 545 | static int pfault_disable; |
1da177e4 LT |
546 | |
547 | static int __init nopfault(char *str) | |
548 | { | |
549 | pfault_disable = 1; | |
550 | return 1; | |
551 | } | |
552 | ||
553 | __setup("nopfault", nopfault); | |
554 | ||
7dd8fe1f HC |
555 | struct pfault_refbk { |
556 | u16 refdiagc; | |
557 | u16 reffcode; | |
558 | u16 refdwlen; | |
559 | u16 refversn; | |
560 | u64 refgaddr; | |
561 | u64 refselmk; | |
562 | u64 refcmpmk; | |
563 | u64 reserved; | |
564 | } __attribute__ ((packed, aligned(8))); | |
1da177e4 | 565 | |
00e9e664 MS |
566 | static struct pfault_refbk pfault_init_refbk = { |
567 | .refdiagc = 0x258, | |
568 | .reffcode = 0, | |
569 | .refdwlen = 5, | |
570 | .refversn = 2, | |
571 | .refgaddr = __LC_LPP, | |
572 | .refselmk = 1ULL << 48, | |
573 | .refcmpmk = 1ULL << 48, | |
574 | .reserved = __PF_RES_FIELD | |
575 | }; | |
576 | ||
1da177e4 LT |
577 | int pfault_init(void) |
578 | { | |
1da177e4 LT |
579 | int rc; |
580 | ||
f32269a0 | 581 | if (pfault_disable) |
1da177e4 | 582 | return -1; |
1ec2772e | 583 | diag_stat_inc(DIAG_STAT_X258); |
94c12cc7 MS |
584 | asm volatile( |
585 | " diag %1,%0,0x258\n" | |
586 | "0: j 2f\n" | |
587 | "1: la %0,8\n" | |
1da177e4 | 588 | "2:\n" |
94c12cc7 | 589 | EX_TABLE(0b,1b) |
00e9e664 MS |
590 | : "=d" (rc) |
591 | : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc"); | |
1da177e4 LT |
592 | return rc; |
593 | } | |
594 | ||
00e9e664 MS |
595 | static struct pfault_refbk pfault_fini_refbk = { |
596 | .refdiagc = 0x258, | |
597 | .reffcode = 1, | |
598 | .refdwlen = 5, | |
599 | .refversn = 2, | |
600 | }; | |
601 | ||
1da177e4 LT |
602 | void pfault_fini(void) |
603 | { | |
1da177e4 | 604 | |
f32269a0 | 605 | if (pfault_disable) |
1da177e4 | 606 | return; |
1ec2772e | 607 | diag_stat_inc(DIAG_STAT_X258); |
94c12cc7 MS |
608 | asm volatile( |
609 | " diag %0,0,0x258\n" | |
6c22c986 | 610 | "0: nopr %%r7\n" |
94c12cc7 | 611 | EX_TABLE(0b,0b) |
00e9e664 | 612 | : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc"); |
1da177e4 LT |
613 | } |
614 | ||
f2db2e6c HC |
615 | static DEFINE_SPINLOCK(pfault_lock); |
616 | static LIST_HEAD(pfault_list); | |
617 | ||
0227f7c4 PZ |
618 | #define PF_COMPLETE 0x0080 |
619 | ||
620 | /* | |
621 | * The mechanism of our pfault code: if Linux is running as guest, runs a user | |
622 | * space process and the user space process accesses a page that the host has | |
623 | * paged out we get a pfault interrupt. | |
624 | * | |
625 | * This allows us, within the guest, to schedule a different process. Without | |
626 | * this mechanism the host would have to suspend the whole virtual cpu until | |
627 | * the page has been paged in. | |
628 | * | |
629 | * So when we get such an interrupt then we set the state of the current task | |
630 | * to uninterruptible and also set the need_resched flag. Both happens within | |
631 | * interrupt context(!). If we later on want to return to user space we | |
632 | * recognize the need_resched flag and then call schedule(). It's not very | |
633 | * obvious how this works... | |
634 | * | |
635 | * Of course we have a lot of additional fun with the completion interrupt (-> | |
636 | * host signals that a page of a process has been paged in and the process can | |
637 | * continue to run). This interrupt can arrive on any cpu and, since we have | |
638 | * virtual cpus, actually appear before the interrupt that signals that a page | |
639 | * is missing. | |
640 | */ | |
fde15c3a | 641 | static void pfault_interrupt(struct ext_code ext_code, |
f6649a7e | 642 | unsigned int param32, unsigned long param64) |
1da177e4 LT |
643 | { |
644 | struct task_struct *tsk; | |
645 | __u16 subcode; | |
f2db2e6c | 646 | pid_t pid; |
1da177e4 LT |
647 | |
648 | /* | |
0227f7c4 PZ |
649 | * Get the external interruption subcode & pfault initial/completion |
650 | * signal bit. VM stores this in the 'cpu address' field associated | |
651 | * with the external interrupt. | |
1da177e4 | 652 | */ |
fde15c3a | 653 | subcode = ext_code.subcode; |
1da177e4 LT |
654 | if ((subcode & 0xff00) != __SUBCODE_MASK) |
655 | return; | |
420f42ec | 656 | inc_irq_stat(IRQEXT_PFL); |
54c27791 | 657 | /* Get the token (= pid of the affected task). */ |
544e8dd7 | 658 | pid = param64 & LPP_PID_MASK; |
54c27791 HC |
659 | rcu_read_lock(); |
660 | tsk = find_task_by_pid_ns(pid, &init_pid_ns); | |
661 | if (tsk) | |
662 | get_task_struct(tsk); | |
663 | rcu_read_unlock(); | |
664 | if (!tsk) | |
665 | return; | |
f2db2e6c | 666 | spin_lock(&pfault_lock); |
0227f7c4 | 667 | if (subcode & PF_COMPLETE) { |
1da177e4 | 668 | /* signal bit is set -> a page has been swapped in by VM */ |
f2db2e6c | 669 | if (tsk->thread.pfault_wait == 1) { |
1da177e4 LT |
670 | /* Initial interrupt was faster than the completion |
671 | * interrupt. pfault_wait is valid. Set pfault_wait | |
672 | * back to zero and wake up the process. This can | |
673 | * safely be done because the task is still sleeping | |
b6d09449 | 674 | * and can't produce new pfaults. */ |
1da177e4 | 675 | tsk->thread.pfault_wait = 0; |
f2db2e6c | 676 | list_del(&tsk->thread.list); |
1da177e4 | 677 | wake_up_process(tsk); |
d5e50a51 | 678 | put_task_struct(tsk); |
f2db2e6c HC |
679 | } else { |
680 | /* Completion interrupt was faster than initial | |
681 | * interrupt. Set pfault_wait to -1 so the initial | |
fa2fb2f4 HC |
682 | * interrupt doesn't put the task to sleep. |
683 | * If the task is not running, ignore the completion | |
684 | * interrupt since it must be a leftover of a PFAULT | |
685 | * CANCEL operation which didn't remove all pending | |
686 | * completion interrupts. */ | |
b03fbd4f | 687 | if (task_is_running(tsk)) |
fa2fb2f4 | 688 | tsk->thread.pfault_wait = -1; |
1da177e4 LT |
689 | } |
690 | } else { | |
691 | /* signal bit not set -> a real page is missing. */ | |
d49f47f8 HC |
692 | if (WARN_ON_ONCE(tsk != current)) |
693 | goto out; | |
d5e50a51 HC |
694 | if (tsk->thread.pfault_wait == 1) { |
695 | /* Already on the list with a reference: put to sleep */ | |
0227f7c4 | 696 | goto block; |
d5e50a51 | 697 | } else if (tsk->thread.pfault_wait == -1) { |
1da177e4 | 698 | /* Completion interrupt was faster than the initial |
f2db2e6c HC |
699 | * interrupt (pfault_wait == -1). Set pfault_wait |
700 | * back to zero and exit. */ | |
1da177e4 | 701 | tsk->thread.pfault_wait = 0; |
f2db2e6c HC |
702 | } else { |
703 | /* Initial interrupt arrived before completion | |
d5e50a51 HC |
704 | * interrupt. Let the task sleep. |
705 | * An extra task reference is needed since a different | |
706 | * cpu may set the task state to TASK_RUNNING again | |
707 | * before the scheduler is reached. */ | |
708 | get_task_struct(tsk); | |
f2db2e6c HC |
709 | tsk->thread.pfault_wait = 1; |
710 | list_add(&tsk->thread.list, &pfault_list); | |
0227f7c4 PZ |
711 | block: |
712 | /* Since this must be a userspace fault, there | |
713 | * is no kernel task state to trample. Rely on the | |
714 | * return to userspace schedule() to block. */ | |
715 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
1da177e4 | 716 | set_tsk_need_resched(tsk); |
c360192b | 717 | set_preempt_need_resched(); |
f2db2e6c HC |
718 | } |
719 | } | |
d49f47f8 | 720 | out: |
f2db2e6c | 721 | spin_unlock(&pfault_lock); |
54c27791 | 722 | put_task_struct(tsk); |
f2db2e6c HC |
723 | } |
724 | ||
84c9ceef | 725 | static int pfault_cpu_dead(unsigned int cpu) |
f2db2e6c HC |
726 | { |
727 | struct thread_struct *thread, *next; | |
728 | struct task_struct *tsk; | |
729 | ||
84c9ceef SAS |
730 | spin_lock_irq(&pfault_lock); |
731 | list_for_each_entry_safe(thread, next, &pfault_list, list) { | |
732 | thread->pfault_wait = 0; | |
733 | list_del(&thread->list); | |
734 | tsk = container_of(thread, struct task_struct, thread); | |
735 | wake_up_process(tsk); | |
736 | put_task_struct(tsk); | |
1da177e4 | 737 | } |
84c9ceef SAS |
738 | spin_unlock_irq(&pfault_lock); |
739 | return 0; | |
1da177e4 | 740 | } |
1da177e4 | 741 | |
fb0a9d7e | 742 | static int __init pfault_irq_init(void) |
29b08d2b | 743 | { |
fb0a9d7e | 744 | int rc; |
29b08d2b | 745 | |
1dad093b | 746 | rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); |
7dd8fe1f HC |
747 | if (rc) |
748 | goto out_extint; | |
749 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; | |
750 | if (rc) | |
751 | goto out_pfault; | |
82003c3e | 752 | irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); |
84c9ceef SAS |
753 | cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead", |
754 | NULL, pfault_cpu_dead); | |
7dd8fe1f | 755 | return 0; |
29b08d2b | 756 | |
7dd8fe1f | 757 | out_pfault: |
1dad093b | 758 | unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); |
7dd8fe1f HC |
759 | out_extint: |
760 | pfault_disable = 1; | |
761 | return rc; | |
29b08d2b | 762 | } |
fb0a9d7e HC |
763 | early_initcall(pfault_irq_init); |
764 | ||
7dd8fe1f | 765 | #endif /* CONFIG_PFAULT */ |
084ea4d6 VG |
766 | |
767 | #if IS_ENABLED(CONFIG_PGSTE) | |
17a363dc | 768 | |
084ea4d6 VG |
769 | void do_secure_storage_access(struct pt_regs *regs) |
770 | { | |
771 | unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; | |
772 | struct vm_area_struct *vma; | |
773 | struct mm_struct *mm; | |
774 | struct page *page; | |
775 | int rc; | |
776 | ||
85b18d7b JF |
777 | /* |
778 | * bit 61 tells us if the address is valid, if it's not we | |
779 | * have a major problem and should stop the kernel or send a | |
780 | * SIGSEGV to the process. Unfortunately bit 61 is not | |
781 | * reliable without the misc UV feature so we need to check | |
782 | * for that as well. | |
783 | */ | |
784 | if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) && | |
785 | !test_bit_inv(61, ®s->int_parm_long)) { | |
786 | /* | |
787 | * When this happens, userspace did something that it | |
788 | * was not supposed to do, e.g. branching into secure | |
789 | * memory. Trigger a segmentation fault. | |
790 | */ | |
791 | if (user_mode(regs)) { | |
792 | send_sig(SIGSEGV, current, 0); | |
793 | return; | |
794 | } | |
795 | ||
796 | /* | |
797 | * The kernel should never run into this case and we | |
798 | * have no way out of this situation. | |
799 | */ | |
800 | panic("Unexpected PGM 0x3d with TEID bit 61=0"); | |
801 | } | |
802 | ||
084ea4d6 VG |
803 | switch (get_fault_type(regs)) { |
804 | case USER_FAULT: | |
805 | mm = current->mm; | |
d8ed45c5 | 806 | mmap_read_lock(mm); |
084ea4d6 VG |
807 | vma = find_vma(mm, addr); |
808 | if (!vma) { | |
d8ed45c5 | 809 | mmap_read_unlock(mm); |
084ea4d6 VG |
810 | do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); |
811 | break; | |
812 | } | |
813 | page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); | |
814 | if (IS_ERR_OR_NULL(page)) { | |
d8ed45c5 | 815 | mmap_read_unlock(mm); |
084ea4d6 VG |
816 | break; |
817 | } | |
818 | if (arch_make_page_accessible(page)) | |
819 | send_sig(SIGSEGV, current, 0); | |
820 | put_page(page); | |
d8ed45c5 | 821 | mmap_read_unlock(mm); |
084ea4d6 VG |
822 | break; |
823 | case KERNEL_FAULT: | |
824 | page = phys_to_page(addr); | |
825 | if (unlikely(!try_get_page(page))) | |
826 | break; | |
827 | rc = arch_make_page_accessible(page); | |
828 | put_page(page); | |
829 | if (rc) | |
830 | BUG(); | |
831 | break; | |
084ea4d6 | 832 | case GMAP_FAULT: |
084ea4d6 VG |
833 | default: |
834 | do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); | |
835 | WARN_ON_ONCE(1); | |
836 | } | |
837 | } | |
838 | NOKPROBE_SYMBOL(do_secure_storage_access); | |
839 | ||
840 | void do_non_secure_storage_access(struct pt_regs *regs) | |
841 | { | |
842 | unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; | |
843 | struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; | |
844 | ||
845 | if (get_fault_type(regs) != GMAP_FAULT) { | |
846 | do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); | |
847 | WARN_ON_ONCE(1); | |
848 | return; | |
849 | } | |
850 | ||
851 | if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL) | |
852 | send_sig(SIGSEGV, current, 0); | |
853 | } | |
854 | NOKPROBE_SYMBOL(do_non_secure_storage_access); | |
855 | ||
cd4d3d5f JF |
856 | void do_secure_storage_violation(struct pt_regs *regs) |
857 | { | |
858 | /* | |
859 | * Either KVM messed up the secure guest mapping or the same | |
860 | * page is mapped into multiple secure guests. | |
861 | * | |
862 | * This exception is only triggered when a guest 2 is running | |
863 | * and can therefore never occur in kernel context. | |
864 | */ | |
865 | printk_ratelimited(KERN_WARNING | |
866 | "Secure storage violation in task: %s, pid %d\n", | |
867 | current->comm, current->pid); | |
868 | send_sig(SIGSEGV, current, 0); | |
869 | } | |
870 | ||
17a363dc | 871 | #endif /* CONFIG_PGSTE */ |