]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/mm/fault.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / mm / fault.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4 3 * S390 version
a53c8fab 4 * Copyright IBM Corp. 1999
1da177e4
LT
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
052ff461 12#include <linux/kernel_stat.h>
cdd6c482 13#include <linux/perf_event.h>
1da177e4
LT
14#include <linux/signal.h>
15#include <linux/sched.h>
b17b0153 16#include <linux/sched/debug.h>
1da177e4
LT
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
7757591a 24#include <linux/compat.h>
1da177e4 25#include <linux/smp.h>
1eeb66a1 26#include <linux/kdebug.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/console.h>
dcc096c5 29#include <linux/extable.h>
1da177e4 30#include <linux/hardirq.h>
4ba069b8 31#include <linux/kprobes.h>
be5ec363 32#include <linux/uaccess.h>
53492b1d 33#include <linux/hugetlb.h>
cbb870c8 34#include <asm/asm-offsets.h>
1ec2772e 35#include <asm/diag.h>
1da177e4 36#include <asm/pgtable.h>
1e133ab2 37#include <asm/gmap.h>
d7b250e2 38#include <asm/irq.h>
6252d702 39#include <asm/mmu_context.h>
a0616cde 40#include <asm/facility.h>
a806170e 41#include "../kernel/entry.h"
1da177e4 42
1da177e4 43#define __FAIL_ADDR_MASK -4096L
1da177e4
LT
44#define __SUBCODE_MASK 0x0600
45#define __PF_RES_FIELD 0x8000000000000000ULL
1da177e4 46
50d7280d
MS
47#define VM_FAULT_BADCONTEXT 0x010000
48#define VM_FAULT_BADMAP 0x020000
49#define VM_FAULT_BADACCESS 0x040000
a4f32bdb 50#define VM_FAULT_SIGNAL 0x080000
24eb3a82 51#define VM_FAULT_PFAULT 0x100000
50d7280d 52
0aaba41b
MS
53enum fault_type {
54 KERNEL_FAULT,
55 USER_FAULT,
56 VDSO_FAULT,
57 GMAP_FAULT,
58};
59
a4f32bdb 60static unsigned long store_indication __read_mostly;
92f842ea 61
a4f32bdb 62static int __init fault_init(void)
92f842ea 63{
a4f32bdb 64 if (test_facility(75))
92f842ea 65 store_indication = 0xc00;
a4f32bdb 66 return 0;
92f842ea 67}
a4f32bdb 68early_initcall(fault_init);
92f842ea 69
7ecb344a 70static inline int notify_page_fault(struct pt_regs *regs)
10c1031f 71{
33464e3b
CH
72 int ret = 0;
73
74 /* kprobe_running() needs smp_processor_id() */
22e0a046 75 if (kprobes_built_in() && !user_mode(regs)) {
33464e3b
CH
76 preempt_disable();
77 if (kprobe_running() && kprobe_fault_handler(regs, 14))
78 ret = 1;
79 preempt_enable();
80 }
33464e3b 81 return ret;
4ba069b8 82}
4ba069b8 83
1da177e4
LT
84
85/*
86 * Unlock any spinlocks which will prevent us from getting the
cefc8be8 87 * message out.
1da177e4
LT
88 */
89void bust_spinlocks(int yes)
90{
91 if (yes) {
92 oops_in_progress = 1;
93 } else {
94 int loglevel_save = console_loglevel;
95 console_unblank();
96 oops_in_progress = 0;
97 /*
98 * OK, the message is on the console. Now we call printk()
99 * without oops_in_progress set so that printk will give klogd
100 * a poke. Hold onto your hats...
101 */
102 console_loglevel = 15;
103 printk(" ");
104 console_loglevel = loglevel_save;
105 }
106}
107
108/*
0aaba41b
MS
109 * Find out which address space caused the exception.
110 * Access register mode is impossible, ignore space == 3.
1da177e4 111 */
0aaba41b 112static inline enum fault_type get_fault_type(struct pt_regs *regs)
1da177e4 113{
457f2180
HC
114 unsigned long trans_exc_code;
115
457f2180 116 trans_exc_code = regs->int_parm_long & 3;
0aaba41b
MS
117 if (likely(trans_exc_code == 0)) {
118 /* primary space exception */
119 if (IS_ENABLED(CONFIG_PGSTE) &&
120 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
121 return GMAP_FAULT;
122 if (current->thread.mm_segment == USER_DS)
123 return USER_FAULT;
124 return KERNEL_FAULT;
125 }
126 if (trans_exc_code == 2) {
127 /* secondary space exception */
128 if (current->thread.mm_segment & 1) {
129 if (current->thread.mm_segment == USER_DS_SACF)
130 return USER_FAULT;
131 return KERNEL_FAULT;
132 }
133 return VDSO_FAULT;
134 }
135 /* home space exception -> access via kernel ASCE */
136 return KERNEL_FAULT;
1da177e4
LT
137}
138
3b7df342
HC
139static int bad_address(void *p)
140{
141 unsigned long dummy;
142
143 return probe_kernel_address((unsigned long *)p, dummy);
144}
145
3b7df342
HC
146static void dump_pagetable(unsigned long asce, unsigned long address)
147{
fe7b2747 148 unsigned long *table = __va(asce & _ASCE_ORIGIN);
3b7df342
HC
149
150 pr_alert("AS:%016lx ", asce);
151 switch (asce & _ASCE_TYPE_MASK) {
152 case _ASCE_TYPE_REGION1:
f1c1174f 153 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
3b7df342
HC
154 if (bad_address(table))
155 goto bad;
156 pr_cont("R1:%016lx ", *table);
157 if (*table & _REGION_ENTRY_INVALID)
158 goto out;
159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
160 /* fallthrough */
161 case _ASCE_TYPE_REGION2:
f1c1174f 162 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
3b7df342
HC
163 if (bad_address(table))
164 goto bad;
165 pr_cont("R2:%016lx ", *table);
166 if (*table & _REGION_ENTRY_INVALID)
167 goto out;
168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
169 /* fallthrough */
170 case _ASCE_TYPE_REGION3:
f1c1174f 171 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
3b7df342
HC
172 if (bad_address(table))
173 goto bad;
174 pr_cont("R3:%016lx ", *table);
175 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
176 goto out;
177 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
178 /* fallthrough */
179 case _ASCE_TYPE_SEGMENT:
f1c1174f 180 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
3b7df342
HC
181 if (bad_address(table))
182 goto bad;
91c0837e 183 pr_cont("S:%016lx ", *table);
3b7df342
HC
184 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
185 goto out;
186 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
187 }
f1c1174f 188 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
3b7df342
HC
189 if (bad_address(table))
190 goto bad;
191 pr_cont("P:%016lx ", *table);
192out:
193 pr_cont("\n");
194 return;
195bad:
196 pr_cont("BAD\n");
197}
198
3b7df342
HC
199static void dump_fault_info(struct pt_regs *regs)
200{
201 unsigned long asce;
202
5d7eccec
HC
203 pr_alert("Failing address: %016lx TEID: %016lx\n",
204 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
3b7df342
HC
205 pr_alert("Fault in ");
206 switch (regs->int_parm_long & 3) {
207 case 3:
208 pr_cont("home space ");
209 break;
210 case 2:
211 pr_cont("secondary space ");
212 break;
213 case 1:
214 pr_cont("access register ");
215 break;
216 case 0:
217 pr_cont("primary space ");
218 break;
219 }
220 pr_cont("mode while using ");
0aaba41b
MS
221 switch (get_fault_type(regs)) {
222 case USER_FAULT:
3b7df342
HC
223 asce = S390_lowcore.user_asce;
224 pr_cont("user ");
0aaba41b
MS
225 break;
226 case VDSO_FAULT:
227 asce = S390_lowcore.vdso_asce;
228 pr_cont("vdso ");
229 break;
230 case GMAP_FAULT:
231 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
232 pr_cont("gmap ");
233 break;
234 case KERNEL_FAULT:
235 asce = S390_lowcore.kernel_asce;
236 pr_cont("kernel ");
237 break;
3b7df342
HC
238 }
239 pr_cont("ASCE.\n");
240 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
241}
242
5d7eccec
HC
243int show_unhandled_signals = 1;
244
245void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
ab3c68ee
HC
246{
247 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
248 return;
249 if (!unhandled_signal(current, signr))
250 return;
251 if (!printk_ratelimit())
252 return;
db1177ee 253 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
413d4047 254 regs->int_code & 0xffff, regs->int_code >> 17);
9cb1ccec 255 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
aa33c8cb 256 printk(KERN_CONT "\n");
5d7eccec
HC
257 if (is_mm_fault)
258 dump_fault_info(regs);
ab3c68ee
HC
259 show_regs(regs);
260}
261
1da177e4
LT
262/*
263 * Send SIGSEGV to task. This is an external routine
264 * to keep the stack usage of do_page_fault small.
265 */
aa33c8cb 266static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
1da177e4
LT
267{
268 struct siginfo si;
269
5d7eccec 270 report_user_fault(regs, SIGSEGV, 1);
1da177e4 271 si.si_signo = SIGSEGV;
cf0d44d5 272 si.si_errno = 0;
1da177e4 273 si.si_code = si_code;
aa33c8cb 274 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
1da177e4
LT
275 force_sig_info(SIGSEGV, &si, current);
276}
277
aa33c8cb 278static noinline void do_no_context(struct pt_regs *regs)
10c1031f
MS
279{
280 const struct exception_table_entry *fixup;
281
282 /* Are we prepared to handle this kernel fault? */
9cb1ccec 283 fixup = search_exception_tables(regs->psw.addr);
10c1031f 284 if (fixup) {
fecc868a 285 regs->psw.addr = extable_fixup(fixup);
10c1031f
MS
286 return;
287 }
288
289 /*
290 * Oops. The kernel tried to access some bad page. We'll have to
291 * terminate things with extreme prejudice.
292 */
0aaba41b 293 if (get_fault_type(regs) == KERNEL_FAULT)
10c1031f 294 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
3b7df342 295 " in virtual kernel address space\n");
10c1031f
MS
296 else
297 printk(KERN_ALERT "Unable to handle kernel paging request"
3b7df342 298 " in virtual user address space\n");
3b7df342 299 dump_fault_info(regs);
aa33c8cb 300 die(regs, "Oops");
10c1031f
MS
301 do_exit(SIGKILL);
302}
303
aa33c8cb 304static noinline void do_low_address(struct pt_regs *regs)
10c1031f
MS
305{
306 /* Low-address protection hit in kernel mode means
307 NULL pointer write access in kernel mode. */
308 if (regs->psw.mask & PSW_MASK_PSTATE) {
309 /* Low-address protection hit in user mode 'cannot happen'. */
aa33c8cb 310 die (regs, "Low-address protection");
10c1031f
MS
311 do_exit(SIGKILL);
312 }
313
aa33c8cb 314 do_no_context(regs);
10c1031f
MS
315}
316
aa33c8cb 317static noinline void do_sigbus(struct pt_regs *regs)
10c1031f
MS
318{
319 struct task_struct *tsk = current;
36bf9680 320 struct siginfo si;
10c1031f 321
10c1031f
MS
322 /*
323 * Send a sigbus, regardless of whether we were in kernel
324 * or user mode.
325 */
36bf9680
MS
326 si.si_signo = SIGBUS;
327 si.si_errno = 0;
328 si.si_code = BUS_ADRERR;
aa33c8cb 329 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
36bf9680 330 force_sig_info(SIGBUS, &si, tsk);
10c1031f
MS
331}
332
57d7f939
MS
333static noinline int signal_return(struct pt_regs *regs)
334{
335 u16 instruction;
336 int rc;
337
338 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
339 if (rc)
340 return rc;
341 if (instruction == 0x0a77) {
342 set_pt_regs_flag(regs, PIF_SYSCALL);
343 regs->int_code = 0x00040077;
344 return 0;
345 } else if (instruction == 0x0aad) {
346 set_pt_regs_flag(regs, PIF_SYSCALL);
347 regs->int_code = 0x000400ad;
348 return 0;
349 }
350 return -EACCES;
351}
352
353static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
50d7280d
MS
354{
355 int si_code;
356
357 switch (fault) {
358 case VM_FAULT_BADACCESS:
57d7f939
MS
359 if (access == VM_EXEC && signal_return(regs) == 0)
360 break;
50d7280d
MS
361 case VM_FAULT_BADMAP:
362 /* Bad memory access. Check if it is kernel or user space. */
7d256175 363 if (user_mode(regs)) {
50d7280d
MS
364 /* User mode accesses just cause a SIGSEGV */
365 si_code = (fault == VM_FAULT_BADMAP) ?
366 SEGV_MAPERR : SEGV_ACCERR;
aa33c8cb 367 do_sigsegv(regs, si_code);
57d7f939 368 break;
50d7280d
MS
369 }
370 case VM_FAULT_BADCONTEXT:
24eb3a82 371 case VM_FAULT_PFAULT:
aa33c8cb 372 do_no_context(regs);
50d7280d 373 break;
f2c76e3b
HC
374 case VM_FAULT_SIGNAL:
375 if (!user_mode(regs))
376 do_no_context(regs);
377 break;
50d7280d 378 default: /* fault & VM_FAULT_ERROR */
99583181 379 if (fault & VM_FAULT_OOM) {
7d256175 380 if (!user_mode(regs))
aa33c8cb 381 do_no_context(regs);
99583181
HC
382 else
383 pagefault_out_of_memory();
33692f27
LT
384 } else if (fault & VM_FAULT_SIGSEGV) {
385 /* Kernel mode? Handle exceptions or die */
386 if (!user_mode(regs))
387 do_no_context(regs);
388 else
389 do_sigsegv(regs, SEGV_MAPERR);
99583181 390 } else if (fault & VM_FAULT_SIGBUS) {
50d7280d 391 /* Kernel mode? Handle exceptions or die */
7d256175 392 if (!user_mode(regs))
aa33c8cb 393 do_no_context(regs);
36bf9680 394 else
aa33c8cb 395 do_sigbus(regs);
50d7280d
MS
396 } else
397 BUG();
398 break;
399 }
400}
401
1da177e4
LT
402/*
403 * This routine handles page faults. It determines the address,
404 * and the problem, and then passes it off to one of the appropriate
405 * routines.
406 *
50d7280d 407 * interruption code (int_code):
1da177e4
LT
408 * 04 Protection -> Write-Protection (suprression)
409 * 10 Segment translation -> Not present (nullification)
410 * 11 Page translation -> Not present (nullification)
411 * 3b Region third trans. -> Not present (nullification)
412 */
aa33c8cb 413static inline int do_exception(struct pt_regs *regs, int access)
1da177e4 414{
24eb3a82 415 struct gmap *gmap;
10c1031f
MS
416 struct task_struct *tsk;
417 struct mm_struct *mm;
418 struct vm_area_struct *vma;
0aaba41b 419 enum fault_type type;
aa33c8cb 420 unsigned long trans_exc_code;
10c1031f 421 unsigned long address;
33ce6140
HC
422 unsigned int flags;
423 int fault;
1da177e4 424
39efd4ec
MS
425 tsk = current;
426 /*
427 * The instruction that caused the program check has
428 * been nullified. Don't signal single step via SIGTRAP.
429 */
d3a73acb 430 clear_pt_regs_flag(regs, PIF_PER_TRAP);
39efd4ec 431
7ecb344a 432 if (notify_page_fault(regs))
50d7280d 433 return 0;
4ba069b8 434
10c1031f 435 mm = tsk->mm;
aa33c8cb 436 trans_exc_code = regs->int_parm_long;
1da177e4 437
1da177e4
LT
438 /*
439 * Verify that the fault happened in user space, that
440 * we are not in an interrupt and that there is a
441 * user context.
442 */
50d7280d 443 fault = VM_FAULT_BADCONTEXT;
0aaba41b
MS
444 type = get_fault_type(regs);
445 switch (type) {
446 case KERNEL_FAULT:
447 goto out;
448 case VDSO_FAULT:
449 fault = VM_FAULT_BADMAP;
50d7280d 450 goto out;
0aaba41b
MS
451 case USER_FAULT:
452 case GMAP_FAULT:
453 if (faulthandler_disabled() || !mm)
454 goto out;
455 break;
456 }
1da177e4 457
61365e13 458 address = trans_exc_code & __FAIL_ADDR_MASK;
a8b0ca17 459 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
f2c76e3b 460 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
759496ba
JW
461 if (user_mode(regs))
462 flags |= FAULT_FLAG_USER;
33ce6140
HC
463 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
464 flags |= FAULT_FLAG_WRITE;
10c1031f 465 down_read(&mm->mmap_sem);
1da177e4 466
0aaba41b
MS
467 gmap = NULL;
468 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
469 gmap = (struct gmap *) S390_lowcore.gmap;
527e30b4 470 current->thread.gmap_addr = address;
4be130a0 471 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
4a494439 472 current->thread.gmap_int_code = regs->int_code & 0xffff;
527e30b4 473 address = __gmap_translate(gmap, address);
e5992f2e
MS
474 if (address == -EFAULT) {
475 fault = VM_FAULT_BADMAP;
476 goto out_up;
477 }
24eb3a82
DD
478 if (gmap->pfault_enabled)
479 flags |= FAULT_FLAG_RETRY_NOWAIT;
e5992f2e 480 }
e5992f2e
MS
481
482retry:
50d7280d 483 fault = VM_FAULT_BADMAP;
482b05dd
GS
484 vma = find_vma(mm, address);
485 if (!vma)
50d7280d 486 goto out_up;
c1821c2e 487
50d7280d
MS
488 if (unlikely(vma->vm_start > address)) {
489 if (!(vma->vm_flags & VM_GROWSDOWN))
490 goto out_up;
491 if (expand_stack(vma, address))
492 goto out_up;
493 }
494
495 /*
496 * Ok, we have a good vm_area for this memory access, so
497 * we can handle it..
498 */
499 fault = VM_FAULT_BADACCESS;
1ab947de 500 if (unlikely(!(vma->vm_flags & access)))
50d7280d 501 goto out_up;
1da177e4 502
53492b1d
GS
503 if (is_vm_hugetlb_page(vma))
504 address &= HPAGE_MASK;
1da177e4
LT
505 /*
506 * If for any reason at all we couldn't handle the fault,
507 * make sure we exit gracefully rather than endlessly redo
508 * the fault.
509 */
dcddffd4 510 fault = handle_mm_fault(vma, address, flags);
f2c76e3b
HC
511 /* No reason to continue if interrupted by SIGKILL. */
512 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
513 fault = VM_FAULT_SIGNAL;
514 goto out;
515 }
50d7280d
MS
516 if (unlikely(fault & VM_FAULT_ERROR))
517 goto out_up;
518
33ce6140
HC
519 /*
520 * Major/minor page fault accounting is only done on the
521 * initial attempt. If we go through a retry, it is extremely
522 * likely that the page will be found in page cache at that point.
523 */
524 if (flags & FAULT_FLAG_ALLOW_RETRY) {
525 if (fault & VM_FAULT_MAJOR) {
526 tsk->maj_flt++;
a8b0ca17 527 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
33ce6140
HC
528 regs, address);
529 } else {
530 tsk->min_flt++;
a8b0ca17 531 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
33ce6140
HC
532 regs, address);
533 }
534 if (fault & VM_FAULT_RETRY) {
0aaba41b
MS
535 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
536 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
24eb3a82
DD
537 /* FAULT_FLAG_RETRY_NOWAIT has been set,
538 * mmap_sem has not been released */
539 current->thread.gmap_pfault = 1;
540 fault = VM_FAULT_PFAULT;
541 goto out_up;
542 }
33ce6140
HC
543 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
544 * of starvation. */
24eb3a82
DD
545 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
546 FAULT_FLAG_RETRY_NOWAIT);
45cac65b 547 flags |= FAULT_FLAG_TRIED;
e5992f2e 548 down_read(&mm->mmap_sem);
33ce6140
HC
549 goto retry;
550 }
bde69af2 551 }
0aaba41b 552 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
527e30b4
MS
553 address = __gmap_link(gmap, current->thread.gmap_addr,
554 address);
555 if (address == -EFAULT) {
556 fault = VM_FAULT_BADMAP;
557 goto out_up;
558 }
559 if (address == -ENOMEM) {
560 fault = VM_FAULT_OOM;
561 goto out_up;
562 }
563 }
50d7280d
MS
564 fault = 0;
565out_up:
10c1031f 566 up_read(&mm->mmap_sem);
50d7280d
MS
567out:
568 return fault;
1da177e4
LT
569}
570
7a5388de 571void do_protection_exception(struct pt_regs *regs)
1da177e4 572{
aa33c8cb 573 unsigned long trans_exc_code;
57d7f939 574 int access, fault;
61365e13 575
aa33c8cb 576 trans_exc_code = regs->int_parm_long;
f752ac4d
MS
577 /*
578 * Protection exceptions are suppressing, decrement psw address.
579 * The exception to this rule are aborted transactions, for these
580 * the PSW already points to the correct location.
581 */
582 if (!(regs->int_code & 0x200))
583 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
10c1031f
MS
584 /*
585 * Check for low-address protection. This needs to be treated
586 * as a special case because the translation exception code
587 * field is not guaranteed to contain valid data in this case.
588 */
61365e13 589 if (unlikely(!(trans_exc_code & 4))) {
aa33c8cb 590 do_low_address(regs);
10c1031f
MS
591 return;
592 }
57d7f939
MS
593 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
594 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
595 (regs->psw.addr & PAGE_MASK);
596 access = VM_EXEC;
597 fault = VM_FAULT_BADACCESS;
598 } else {
599 access = VM_WRITE;
600 fault = do_exception(regs, access);
601 }
50d7280d 602 if (unlikely(fault))
57d7f939 603 do_fault_error(regs, access, fault);
1da177e4 604}
7a5388de 605NOKPROBE_SYMBOL(do_protection_exception);
1da177e4 606
7a5388de 607void do_dat_exception(struct pt_regs *regs)
1da177e4 608{
1ab947de 609 int access, fault;
50d7280d 610
1ab947de 611 access = VM_READ | VM_EXEC | VM_WRITE;
aa33c8cb 612 fault = do_exception(regs, access);
50d7280d 613 if (unlikely(fault))
57d7f939 614 do_fault_error(regs, access, fault);
1da177e4 615}
7a5388de 616NOKPROBE_SYMBOL(do_dat_exception);
1da177e4 617
1da177e4
LT
618#ifdef CONFIG_PFAULT
619/*
620 * 'pfault' pseudo page faults routines.
621 */
fb0a9d7e 622static int pfault_disable;
1da177e4
LT
623
624static int __init nopfault(char *str)
625{
626 pfault_disable = 1;
627 return 1;
628}
629
630__setup("nopfault", nopfault);
631
7dd8fe1f
HC
632struct pfault_refbk {
633 u16 refdiagc;
634 u16 reffcode;
635 u16 refdwlen;
636 u16 refversn;
637 u64 refgaddr;
638 u64 refselmk;
639 u64 refcmpmk;
640 u64 reserved;
641} __attribute__ ((packed, aligned(8)));
1da177e4
LT
642
643int pfault_init(void)
644{
7dd8fe1f
HC
645 struct pfault_refbk refbk = {
646 .refdiagc = 0x258,
647 .reffcode = 0,
648 .refdwlen = 5,
649 .refversn = 2,
e22cf8ca 650 .refgaddr = __LC_LPP,
7dd8fe1f
HC
651 .refselmk = 1ULL << 48,
652 .refcmpmk = 1ULL << 48,
653 .reserved = __PF_RES_FIELD };
1da177e4
LT
654 int rc;
655
f32269a0 656 if (pfault_disable)
1da177e4 657 return -1;
1ec2772e 658 diag_stat_inc(DIAG_STAT_X258);
94c12cc7
MS
659 asm volatile(
660 " diag %1,%0,0x258\n"
661 "0: j 2f\n"
662 "1: la %0,8\n"
1da177e4 663 "2:\n"
94c12cc7
MS
664 EX_TABLE(0b,1b)
665 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
1da177e4
LT
666 return rc;
667}
668
669void pfault_fini(void)
670{
7dd8fe1f
HC
671 struct pfault_refbk refbk = {
672 .refdiagc = 0x258,
673 .reffcode = 1,
674 .refdwlen = 5,
675 .refversn = 2,
676 };
1da177e4 677
f32269a0 678 if (pfault_disable)
1da177e4 679 return;
1ec2772e 680 diag_stat_inc(DIAG_STAT_X258);
94c12cc7
MS
681 asm volatile(
682 " diag %0,0,0x258\n"
6c22c986 683 "0: nopr %%r7\n"
94c12cc7
MS
684 EX_TABLE(0b,0b)
685 : : "a" (&refbk), "m" (refbk) : "cc");
1da177e4
LT
686}
687
f2db2e6c
HC
688static DEFINE_SPINLOCK(pfault_lock);
689static LIST_HEAD(pfault_list);
690
0227f7c4
PZ
691#define PF_COMPLETE 0x0080
692
693/*
694 * The mechanism of our pfault code: if Linux is running as guest, runs a user
695 * space process and the user space process accesses a page that the host has
696 * paged out we get a pfault interrupt.
697 *
698 * This allows us, within the guest, to schedule a different process. Without
699 * this mechanism the host would have to suspend the whole virtual cpu until
700 * the page has been paged in.
701 *
702 * So when we get such an interrupt then we set the state of the current task
703 * to uninterruptible and also set the need_resched flag. Both happens within
704 * interrupt context(!). If we later on want to return to user space we
705 * recognize the need_resched flag and then call schedule(). It's not very
706 * obvious how this works...
707 *
708 * Of course we have a lot of additional fun with the completion interrupt (->
709 * host signals that a page of a process has been paged in and the process can
710 * continue to run). This interrupt can arrive on any cpu and, since we have
711 * virtual cpus, actually appear before the interrupt that signals that a page
712 * is missing.
713 */
fde15c3a 714static void pfault_interrupt(struct ext_code ext_code,
f6649a7e 715 unsigned int param32, unsigned long param64)
1da177e4
LT
716{
717 struct task_struct *tsk;
718 __u16 subcode;
f2db2e6c 719 pid_t pid;
1da177e4
LT
720
721 /*
0227f7c4
PZ
722 * Get the external interruption subcode & pfault initial/completion
723 * signal bit. VM stores this in the 'cpu address' field associated
724 * with the external interrupt.
1da177e4 725 */
fde15c3a 726 subcode = ext_code.subcode;
1da177e4
LT
727 if ((subcode & 0xff00) != __SUBCODE_MASK)
728 return;
420f42ec 729 inc_irq_stat(IRQEXT_PFL);
54c27791 730 /* Get the token (= pid of the affected task). */
544e8dd7 731 pid = param64 & LPP_PID_MASK;
54c27791
HC
732 rcu_read_lock();
733 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
734 if (tsk)
735 get_task_struct(tsk);
736 rcu_read_unlock();
737 if (!tsk)
738 return;
f2db2e6c 739 spin_lock(&pfault_lock);
0227f7c4 740 if (subcode & PF_COMPLETE) {
1da177e4 741 /* signal bit is set -> a page has been swapped in by VM */
f2db2e6c 742 if (tsk->thread.pfault_wait == 1) {
1da177e4
LT
743 /* Initial interrupt was faster than the completion
744 * interrupt. pfault_wait is valid. Set pfault_wait
745 * back to zero and wake up the process. This can
746 * safely be done because the task is still sleeping
b6d09449 747 * and can't produce new pfaults. */
1da177e4 748 tsk->thread.pfault_wait = 0;
f2db2e6c 749 list_del(&tsk->thread.list);
1da177e4 750 wake_up_process(tsk);
d5e50a51 751 put_task_struct(tsk);
f2db2e6c
HC
752 } else {
753 /* Completion interrupt was faster than initial
754 * interrupt. Set pfault_wait to -1 so the initial
fa2fb2f4
HC
755 * interrupt doesn't put the task to sleep.
756 * If the task is not running, ignore the completion
757 * interrupt since it must be a leftover of a PFAULT
758 * CANCEL operation which didn't remove all pending
759 * completion interrupts. */
760 if (tsk->state == TASK_RUNNING)
761 tsk->thread.pfault_wait = -1;
1da177e4
LT
762 }
763 } else {
764 /* signal bit not set -> a real page is missing. */
d49f47f8
HC
765 if (WARN_ON_ONCE(tsk != current))
766 goto out;
d5e50a51
HC
767 if (tsk->thread.pfault_wait == 1) {
768 /* Already on the list with a reference: put to sleep */
0227f7c4 769 goto block;
d5e50a51 770 } else if (tsk->thread.pfault_wait == -1) {
1da177e4 771 /* Completion interrupt was faster than the initial
f2db2e6c
HC
772 * interrupt (pfault_wait == -1). Set pfault_wait
773 * back to zero and exit. */
1da177e4 774 tsk->thread.pfault_wait = 0;
f2db2e6c
HC
775 } else {
776 /* Initial interrupt arrived before completion
d5e50a51
HC
777 * interrupt. Let the task sleep.
778 * An extra task reference is needed since a different
779 * cpu may set the task state to TASK_RUNNING again
780 * before the scheduler is reached. */
781 get_task_struct(tsk);
f2db2e6c
HC
782 tsk->thread.pfault_wait = 1;
783 list_add(&tsk->thread.list, &pfault_list);
0227f7c4
PZ
784block:
785 /* Since this must be a userspace fault, there
786 * is no kernel task state to trample. Rely on the
787 * return to userspace schedule() to block. */
788 __set_current_state(TASK_UNINTERRUPTIBLE);
1da177e4 789 set_tsk_need_resched(tsk);
c360192b 790 set_preempt_need_resched();
f2db2e6c
HC
791 }
792 }
d49f47f8 793out:
f2db2e6c 794 spin_unlock(&pfault_lock);
54c27791 795 put_task_struct(tsk);
f2db2e6c
HC
796}
797
84c9ceef 798static int pfault_cpu_dead(unsigned int cpu)
f2db2e6c
HC
799{
800 struct thread_struct *thread, *next;
801 struct task_struct *tsk;
802
84c9ceef
SAS
803 spin_lock_irq(&pfault_lock);
804 list_for_each_entry_safe(thread, next, &pfault_list, list) {
805 thread->pfault_wait = 0;
806 list_del(&thread->list);
807 tsk = container_of(thread, struct task_struct, thread);
808 wake_up_process(tsk);
809 put_task_struct(tsk);
1da177e4 810 }
84c9ceef
SAS
811 spin_unlock_irq(&pfault_lock);
812 return 0;
1da177e4 813}
1da177e4 814
fb0a9d7e 815static int __init pfault_irq_init(void)
29b08d2b 816{
fb0a9d7e 817 int rc;
29b08d2b 818
1dad093b 819 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
7dd8fe1f
HC
820 if (rc)
821 goto out_extint;
822 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
823 if (rc)
824 goto out_pfault;
82003c3e 825 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
84c9ceef
SAS
826 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
827 NULL, pfault_cpu_dead);
7dd8fe1f 828 return 0;
29b08d2b 829
7dd8fe1f 830out_pfault:
1dad093b 831 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
7dd8fe1f
HC
832out_extint:
833 pfault_disable = 1;
834 return rc;
29b08d2b 835}
fb0a9d7e
HC
836early_initcall(pfault_irq_init);
837
7dd8fe1f 838#endif /* CONFIG_PFAULT */