]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/s390/kernel/entry.S
Merge tag 'tif-task_work.arch-2020-12-14' of git://git.kernel.dk/linux-block
[mirror_ubuntu-kernels.git] / arch / s390 / kernel / entry.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 low-level entry points.
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12 #include <linux/init.h>
13 #include <linux/linkage.h>
14 #include <asm/alternative-asm.h>
15 #include <asm/processor.h>
16 #include <asm/cache.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/dwarf.h>
19 #include <asm/errno.h>
20 #include <asm/ptrace.h>
21 #include <asm/thread_info.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/unistd.h>
24 #include <asm/page.h>
25 #include <asm/sigp.h>
26 #include <asm/irq.h>
27 #include <asm/vx-insn.h>
28 #include <asm/setup.h>
29 #include <asm/nmi.h>
30 #include <asm/export.h>
31 #include <asm/nospec-insn.h>
32
33 __PT_R0 = __PT_GPRS
34 __PT_R1 = __PT_GPRS + 8
35 __PT_R2 = __PT_GPRS + 16
36 __PT_R3 = __PT_GPRS + 24
37 __PT_R4 = __PT_GPRS + 32
38 __PT_R5 = __PT_GPRS + 40
39 __PT_R6 = __PT_GPRS + 48
40 __PT_R7 = __PT_GPRS + 56
41 __PT_R8 = __PT_GPRS + 64
42 __PT_R9 = __PT_GPRS + 72
43 __PT_R10 = __PT_GPRS + 80
44 __PT_R11 = __PT_GPRS + 88
45 __PT_R12 = __PT_GPRS + 96
46 __PT_R13 = __PT_GPRS + 104
47 __PT_R14 = __PT_GPRS + 112
48 __PT_R15 = __PT_GPRS + 120
49
50 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51 STACK_SIZE = 1 << STACK_SHIFT
52 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54 _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
56 _TIF_NOTIFY_SIGNAL)
57 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
58 _TIF_SYSCALL_TRACEPOINT)
59 _CIF_WORK = (_CIF_FPU)
60 _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62 _LPP_OFFSET = __LC_LPP
63
64 .macro TRACE_IRQS_ON
65 #ifdef CONFIG_TRACE_IRQFLAGS
66 basr %r2,%r0
67 brasl %r14,trace_hardirqs_on_caller
68 #endif
69 .endm
70
71 .macro TRACE_IRQS_OFF
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 basr %r2,%r0
74 brasl %r14,trace_hardirqs_off_caller
75 #endif
76 .endm
77
78 .macro LOCKDEP_SYS_EXIT
79 #ifdef CONFIG_LOCKDEP
80 tm __PT_PSW+1(%r11),0x01 # returning to user ?
81 jz .+10
82 brasl %r14,lockdep_sys_exit
83 #endif
84 .endm
85
86 .macro CHECK_STACK savearea
87 #ifdef CONFIG_CHECK_STACK
88 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
89 lghi %r14,\savearea
90 jz stack_overflow
91 #endif
92 .endm
93
94 .macro DEBUG_USER_ASCE
95 #ifdef CONFIG_DEBUG_USER_ASCE
96 brasl %r14,debug_user_asce
97 #endif
98 .endm
99
100 .macro CHECK_VMAP_STACK savearea,oklabel
101 #ifdef CONFIG_VMAP_STACK
102 lgr %r14,%r15
103 nill %r14,0x10000 - STACK_SIZE
104 oill %r14,STACK_INIT
105 clg %r14,__LC_KERNEL_STACK
106 je \oklabel
107 clg %r14,__LC_ASYNC_STACK
108 je \oklabel
109 clg %r14,__LC_NODAT_STACK
110 je \oklabel
111 clg %r14,__LC_RESTART_STACK
112 je \oklabel
113 lghi %r14,\savearea
114 j stack_overflow
115 #else
116 j \oklabel
117 #endif
118 .endm
119
120 .macro SWITCH_ASYNC savearea,timer,clock
121 tmhh %r8,0x0001 # interrupting from user ?
122 jnz 4f
123 #if IS_ENABLED(CONFIG_KVM)
124 lgr %r14,%r9
125 larl %r13,.Lsie_gmap
126 slgr %r14,%r13
127 lghi %r13,.Lsie_done - .Lsie_gmap
128 clgr %r14,%r13
129 jhe 0f
130 lghi %r11,\savearea # inside critical section, do cleanup
131 brasl %r14,.Lcleanup_sie
132 #endif
133 0: larl %r13,.Lpsw_idle_exit
134 cgr %r13,%r9
135 jne 3f
136
137 larl %r1,smp_cpu_mtid
138 llgf %r1,0(%r1)
139 ltgr %r1,%r1
140 jz 2f # no SMT, skip mt_cycles calculation
141 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
142 larl %r3,mt_cycles
143 ag %r3,__LC_PERCPU_OFFSET
144 la %r4,__SF_EMPTY+16(%r15)
145 1: lg %r0,0(%r3)
146 slg %r0,0(%r4)
147 alg %r0,64(%r4)
148 stg %r0,0(%r3)
149 la %r3,8(%r3)
150 la %r4,8(%r4)
151 brct %r1,1b
152
153 2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock
154 mvc __TIMER_IDLE_EXIT(8,%r2), \timer
155 # account system time going idle
156 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
157
158 lg %r13,__LC_STEAL_TIMER
159 alg %r13,__CLOCK_IDLE_ENTER(%r2)
160 slg %r13,__LC_LAST_UPDATE_CLOCK
161 stg %r13,__LC_STEAL_TIMER
162
163 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
164
165 lg %r13,__LC_SYSTEM_TIMER
166 alg %r13,__LC_LAST_UPDATE_TIMER
167 slg %r13,__TIMER_IDLE_ENTER(%r2)
168 stg %r13,__LC_SYSTEM_TIMER
169 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
170
171 nihh %r8,0xfcfd # clear wait state and irq bits
172 3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
173 slgr %r14,%r15
174 srag %r14,%r14,STACK_SHIFT
175 jnz 5f
176 CHECK_STACK \savearea
177 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
178 j 6f
179 4: UPDATE_VTIME %r14,%r15,\timer
180 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
181 5: lg %r15,__LC_ASYNC_STACK # load async stack
182 6: la %r11,STACK_FRAME_OVERHEAD(%r15)
183 .endm
184
185 .macro UPDATE_VTIME w1,w2,enter_timer
186 lg \w1,__LC_EXIT_TIMER
187 lg \w2,__LC_LAST_UPDATE_TIMER
188 slg \w1,\enter_timer
189 slg \w2,__LC_EXIT_TIMER
190 alg \w1,__LC_USER_TIMER
191 alg \w2,__LC_SYSTEM_TIMER
192 stg \w1,__LC_USER_TIMER
193 stg \w2,__LC_SYSTEM_TIMER
194 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
195 .endm
196
197 .macro RESTORE_SM_CLEAR_PER
198 stg %r8,__LC_RETURN_PSW
199 ni __LC_RETURN_PSW,0xbf
200 ssm __LC_RETURN_PSW
201 .endm
202
203 .macro ENABLE_INTS
204 stosm __SF_EMPTY(%r15),3
205 .endm
206
207 .macro ENABLE_INTS_TRACE
208 TRACE_IRQS_ON
209 ENABLE_INTS
210 .endm
211
212 .macro DISABLE_INTS
213 stnsm __SF_EMPTY(%r15),0xfc
214 .endm
215
216 .macro DISABLE_INTS_TRACE
217 DISABLE_INTS
218 TRACE_IRQS_OFF
219 .endm
220
221 .macro STCK savearea
222 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
223 .insn s,0xb27c0000,\savearea # store clock fast
224 #else
225 .insn s,0xb2050000,\savearea # store clock
226 #endif
227 .endm
228
229 /*
230 * The TSTMSK macro generates a test-under-mask instruction by
231 * calculating the memory offset for the specified mask value.
232 * Mask value can be any constant. The macro shifts the mask
233 * value to calculate the memory offset for the test-under-mask
234 * instruction.
235 */
236 .macro TSTMSK addr, mask, size=8, bytepos=0
237 .if (\bytepos < \size) && (\mask >> 8)
238 .if (\mask & 0xff)
239 .error "Mask exceeds byte boundary"
240 .endif
241 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
242 .exitm
243 .endif
244 .ifeq \mask
245 .error "Mask must not be zero"
246 .endif
247 off = \size - \bytepos - 1
248 tm off+\addr, \mask
249 .endm
250
251 .macro BPOFF
252 ALTERNATIVE "", ".long 0xb2e8c000", 82
253 .endm
254
255 .macro BPON
256 ALTERNATIVE "", ".long 0xb2e8d000", 82
257 .endm
258
259 .macro BPENTER tif_ptr,tif_mask
260 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
261 "", 82
262 .endm
263
264 .macro BPEXIT tif_ptr,tif_mask
265 TSTMSK \tif_ptr,\tif_mask
266 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
267 "jnz .+8; .long 0xb2e8d000", 82
268 .endm
269
270 GEN_BR_THUNK %r9
271 GEN_BR_THUNK %r14
272 GEN_BR_THUNK %r14,%r11
273
274 .section .kprobes.text, "ax"
275 .Ldummy:
276 /*
277 * This nop exists only in order to avoid that __switch_to starts at
278 * the beginning of the kprobes text section. In that case we would
279 * have several symbols at the same address. E.g. objdump would take
280 * an arbitrary symbol name when disassembling this code.
281 * With the added nop in between the __switch_to symbol is unique
282 * again.
283 */
284 nop 0
285
286 ENTRY(__bpon)
287 .globl __bpon
288 BPON
289 BR_EX %r14
290 ENDPROC(__bpon)
291
292 /*
293 * Scheduler resume function, called by switch_to
294 * gpr2 = (task_struct *) prev
295 * gpr3 = (task_struct *) next
296 * Returns:
297 * gpr2 = prev
298 */
299 ENTRY(__switch_to)
300 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
301 lghi %r4,__TASK_stack
302 lghi %r1,__TASK_thread
303 llill %r5,STACK_INIT
304 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
305 lg %r15,0(%r4,%r3) # start of kernel stack of next
306 agr %r15,%r5 # end of kernel stack of next
307 stg %r3,__LC_CURRENT # store task struct of next
308 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
309 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
310 aghi %r3,__TASK_pid
311 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
312 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
313 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
314 BR_EX %r14
315 ENDPROC(__switch_to)
316
317 #if IS_ENABLED(CONFIG_KVM)
318 /*
319 * sie64a calling convention:
320 * %r2 pointer to sie control block
321 * %r3 guest register save area
322 */
323 ENTRY(sie64a)
324 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
325 lg %r12,__LC_CURRENT
326 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
327 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
328 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
329 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
330 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
331 jno .Lsie_load_guest_gprs
332 brasl %r14,load_fpu_regs # load guest fp/vx regs
333 .Lsie_load_guest_gprs:
334 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
335 lg %r14,__LC_GMAP # get gmap pointer
336 ltgr %r14,%r14
337 jz .Lsie_gmap
338 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
339 .Lsie_gmap:
340 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
341 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
342 tm __SIE_PROG20+3(%r14),3 # last exit...
343 jnz .Lsie_skip
344 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
345 jo .Lsie_skip # exit if fp/vx regs changed
346 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
347 .Lsie_entry:
348 sie 0(%r14)
349 BPOFF
350 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
351 .Lsie_skip:
352 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
353 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
354 .Lsie_done:
355 # some program checks are suppressing. C code (e.g. do_protection_exception)
356 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
357 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
358 # Other instructions between sie64a and .Lsie_done should not cause program
359 # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
360 # See also .Lcleanup_sie
361 .Lrewind_pad6:
362 nopr 7
363 .Lrewind_pad4:
364 nopr 7
365 .Lrewind_pad2:
366 nopr 7
367 .globl sie_exit
368 sie_exit:
369 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
370 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
371 xgr %r0,%r0 # clear guest registers to
372 xgr %r1,%r1 # prevent speculative use
373 xgr %r2,%r2
374 xgr %r3,%r3
375 xgr %r4,%r4
376 xgr %r5,%r5
377 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
378 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
379 BR_EX %r14
380 .Lsie_fault:
381 lghi %r14,-EFAULT
382 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
383 j sie_exit
384
385 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
386 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
387 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
388 EX_TABLE(sie_exit,.Lsie_fault)
389 ENDPROC(sie64a)
390 EXPORT_SYMBOL(sie64a)
391 EXPORT_SYMBOL(sie_exit)
392 #endif
393
394 /*
395 * SVC interrupt handler routine. System calls are synchronous events and
396 * are entered with interrupts disabled.
397 */
398
399 ENTRY(system_call)
400 stpt __LC_SYNC_ENTER_TIMER
401 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
402 BPOFF
403 lg %r12,__LC_CURRENT
404 lghi %r14,_PIF_SYSCALL
405 .Lsysc_per:
406 lctlg %c1,%c1,__LC_KERNEL_ASCE
407 lghi %r13,__TASK_thread
408 lg %r15,__LC_KERNEL_STACK
409 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
410 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
411 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
412 stmg %r0,%r7,__PT_R0(%r11)
413 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
414 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
415 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
416 stg %r14,__PT_FLAGS(%r11)
417 ENABLE_INTS
418 .Lsysc_do_svc:
419 # clear user controlled register to prevent speculative use
420 xgr %r0,%r0
421 # load address of system call table
422 lg %r10,__THREAD_sysc_table(%r13,%r12)
423 llgh %r8,__PT_INT_CODE+2(%r11)
424 slag %r8,%r8,3 # shift and test for svc 0
425 jnz .Lsysc_nr_ok
426 # svc 0: system call number in %r1
427 llgfr %r1,%r1 # clear high word in r1
428 sth %r1,__PT_INT_CODE+2(%r11)
429 cghi %r1,NR_syscalls
430 jnl .Lsysc_nr_ok
431 slag %r8,%r1,3
432 .Lsysc_nr_ok:
433 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
434 stg %r2,__PT_ORIG_GPR2(%r11)
435 stg %r7,STACK_FRAME_OVERHEAD(%r15)
436 lg %r9,0(%r8,%r10) # get system call add.
437 TSTMSK __TI_flags(%r12),_TIF_TRACE
438 jnz .Lsysc_tracesys
439 BASR_EX %r14,%r9 # call sys_xxxx
440 stg %r2,__PT_R2(%r11) # store return value
441
442 .Lsysc_return:
443 #ifdef CONFIG_DEBUG_RSEQ
444 lgr %r2,%r11
445 brasl %r14,rseq_syscall
446 #endif
447 LOCKDEP_SYS_EXIT
448 .Lsysc_tif:
449 DISABLE_INTS
450 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
451 jnz .Lsysc_work
452 TSTMSK __TI_flags(%r12),_TIF_WORK
453 jnz .Lsysc_work # check for work
454 DEBUG_USER_ASCE
455 lctlg %c1,%c1,__LC_USER_ASCE
456 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
457 TSTMSK __LC_CPU_FLAGS, _CIF_FPU
458 jz .Lsysc_skip_fpu
459 brasl %r14,load_fpu_regs
460 .Lsysc_skip_fpu:
461 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
462 stpt __LC_EXIT_TIMER
463 lmg %r0,%r15,__PT_R0(%r11)
464 b __LC_RETURN_LPSWE
465
466 #
467 # One of the work bits is on. Find out which one.
468 #
469 .Lsysc_work:
470 ENABLE_INTS
471 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
472 jo .Lsysc_reschedule
473 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
474 jo .Lsysc_syscall_restart
475 #ifdef CONFIG_UPROBES
476 TSTMSK __TI_flags(%r12),_TIF_UPROBE
477 jo .Lsysc_uprobe_notify
478 #endif
479 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
480 jo .Lsysc_guarded_storage
481 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
482 jo .Lsysc_singlestep
483 #ifdef CONFIG_LIVEPATCH
484 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
485 jo .Lsysc_patch_pending # handle live patching just before
486 # signals and possible syscall restart
487 #endif
488 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
489 jo .Lsysc_syscall_restart
490 TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
491 jnz .Lsysc_sigpending
492 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
493 jo .Lsysc_notify_resume
494 j .Lsysc_return
495
496 #
497 # _TIF_NEED_RESCHED is set, call schedule
498 #
499 .Lsysc_reschedule:
500 larl %r14,.Lsysc_return
501 jg schedule
502
503 #
504 # _TIF_SIGPENDING is set, call do_signal
505 #
506 .Lsysc_sigpending:
507 lgr %r2,%r11 # pass pointer to pt_regs
508 brasl %r14,do_signal
509 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
510 jno .Lsysc_return
511 .Lsysc_do_syscall:
512 lghi %r13,__TASK_thread
513 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
514 lghi %r1,0 # svc 0 returns -ENOSYS
515 j .Lsysc_do_svc
516
517 #
518 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
519 #
520 .Lsysc_notify_resume:
521 lgr %r2,%r11 # pass pointer to pt_regs
522 larl %r14,.Lsysc_return
523 jg do_notify_resume
524
525 #
526 # _TIF_UPROBE is set, call uprobe_notify_resume
527 #
528 #ifdef CONFIG_UPROBES
529 .Lsysc_uprobe_notify:
530 lgr %r2,%r11 # pass pointer to pt_regs
531 larl %r14,.Lsysc_return
532 jg uprobe_notify_resume
533 #endif
534
535 #
536 # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
537 #
538 .Lsysc_guarded_storage:
539 lgr %r2,%r11 # pass pointer to pt_regs
540 larl %r14,.Lsysc_return
541 jg gs_load_bc_cb
542 #
543 # _TIF_PATCH_PENDING is set, call klp_update_patch_state
544 #
545 #ifdef CONFIG_LIVEPATCH
546 .Lsysc_patch_pending:
547 lg %r2,__LC_CURRENT # pass pointer to task struct
548 larl %r14,.Lsysc_return
549 jg klp_update_patch_state
550 #endif
551
552 #
553 # _PIF_PER_TRAP is set, call do_per_trap
554 #
555 .Lsysc_singlestep:
556 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
557 lgr %r2,%r11 # pass pointer to pt_regs
558 larl %r14,.Lsysc_return
559 jg do_per_trap
560
561 #
562 # _PIF_SYSCALL_RESTART is set, repeat the current system call
563 #
564 .Lsysc_syscall_restart:
565 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
566 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
567 lg %r2,__PT_ORIG_GPR2(%r11)
568 j .Lsysc_do_svc
569
570 #
571 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
572 # and after the system call
573 #
574 .Lsysc_tracesys:
575 lgr %r2,%r11 # pass pointer to pt_regs
576 la %r3,0
577 llgh %r0,__PT_INT_CODE+2(%r11)
578 stg %r0,__PT_R2(%r11)
579 brasl %r14,do_syscall_trace_enter
580 lghi %r0,NR_syscalls
581 clgr %r0,%r2
582 jnh .Lsysc_tracenogo
583 sllg %r8,%r2,3
584 lg %r9,0(%r8,%r10)
585 lmg %r3,%r7,__PT_R3(%r11)
586 stg %r7,STACK_FRAME_OVERHEAD(%r15)
587 lg %r2,__PT_ORIG_GPR2(%r11)
588 BASR_EX %r14,%r9 # call sys_xxx
589 stg %r2,__PT_R2(%r11) # store return value
590 .Lsysc_tracenogo:
591 TSTMSK __TI_flags(%r12),_TIF_TRACE
592 jz .Lsysc_return
593 lgr %r2,%r11 # pass pointer to pt_regs
594 larl %r14,.Lsysc_return
595 jg do_syscall_trace_exit
596 ENDPROC(system_call)
597
598 #
599 # a new process exits the kernel with ret_from_fork
600 #
601 ENTRY(ret_from_fork)
602 la %r11,STACK_FRAME_OVERHEAD(%r15)
603 lg %r12,__LC_CURRENT
604 brasl %r14,schedule_tail
605 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
606 jne .Lsysc_tracenogo
607 # it's a kernel thread
608 lmg %r9,%r10,__PT_R9(%r11) # load gprs
609 la %r2,0(%r10)
610 BASR_EX %r14,%r9
611 j .Lsysc_tracenogo
612 ENDPROC(ret_from_fork)
613
614 ENTRY(kernel_thread_starter)
615 la %r2,0(%r10)
616 BASR_EX %r14,%r9
617 j .Lsysc_tracenogo
618 ENDPROC(kernel_thread_starter)
619
620 /*
621 * Program check handler routine
622 */
623
624 ENTRY(pgm_check_handler)
625 stpt __LC_SYNC_ENTER_TIMER
626 BPOFF
627 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
628 lg %r10,__LC_LAST_BREAK
629 srag %r11,%r10,12
630 jnz 0f
631 /* if __LC_LAST_BREAK is < 4096, it contains one of
632 * the lpswe addresses in lowcore. Set it to 1 (initial state)
633 * to prevent leaking that address to userspace.
634 */
635 lghi %r10,1
636 0: lg %r12,__LC_CURRENT
637 lghi %r11,0
638 lmg %r8,%r9,__LC_PGM_OLD_PSW
639 tmhh %r8,0x0001 # coming from user space?
640 jno .Lpgm_skip_asce
641 lctlg %c1,%c1,__LC_KERNEL_ASCE
642 j 3f
643 .Lpgm_skip_asce:
644 #if IS_ENABLED(CONFIG_KVM)
645 # cleanup critical section for program checks in sie64a
646 lgr %r14,%r9
647 larl %r13,.Lsie_gmap
648 slgr %r14,%r13
649 lghi %r13,.Lsie_done - .Lsie_gmap
650 clgr %r14,%r13
651 jhe 1f
652 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
653 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
654 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
655 larl %r9,sie_exit # skip forward to sie_exit
656 lghi %r11,_PIF_GUEST_FAULT
657 #endif
658 1: tmhh %r8,0x4000 # PER bit set in old PSW ?
659 jnz 2f # -> enabled, can't be a double fault
660 tm __LC_PGM_ILC+3,0x80 # check for per exception
661 jnz .Lpgm_svcper # -> single stepped svc
662 2: CHECK_STACK __LC_SAVE_AREA_SYNC
663 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
664 # CHECK_VMAP_STACK branches to stack_overflow or 5f
665 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
666 3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
667 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
668 lg %r15,__LC_KERNEL_STACK
669 lgr %r14,%r12
670 aghi %r14,__TASK_thread # pointer to thread_struct
671 lghi %r13,__LC_PGM_TDB
672 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
673 jz 4f
674 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
675 4: stg %r10,__THREAD_last_break(%r14)
676 5: lgr %r13,%r11
677 la %r11,STACK_FRAME_OVERHEAD(%r15)
678 stmg %r0,%r7,__PT_R0(%r11)
679 # clear user controlled registers to prevent speculative use
680 xgr %r0,%r0
681 xgr %r1,%r1
682 xgr %r2,%r2
683 xgr %r3,%r3
684 xgr %r4,%r4
685 xgr %r5,%r5
686 xgr %r6,%r6
687 xgr %r7,%r7
688 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
689 stmg %r8,%r9,__PT_PSW(%r11)
690 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
691 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
692 stg %r13,__PT_FLAGS(%r11)
693 stg %r10,__PT_ARGS(%r11)
694 tm __LC_PGM_ILC+3,0x80 # check for per exception
695 jz 6f
696 tmhh %r8,0x0001 # kernel per event ?
697 jz .Lpgm_kprobe
698 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
699 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
700 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
701 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
702 6: RESTORE_SM_CLEAR_PER
703 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
704 larl %r1,pgm_check_table
705 llgh %r10,__PT_INT_CODE+2(%r11)
706 nill %r10,0x007f
707 sll %r10,3
708 je .Lpgm_return
709 lg %r9,0(%r10,%r1) # load address of handler routine
710 lgr %r2,%r11 # pass pointer to pt_regs
711 BASR_EX %r14,%r9 # branch to interrupt-handler
712 .Lpgm_return:
713 LOCKDEP_SYS_EXIT
714 tm __PT_PSW+1(%r11),0x01 # returning to user ?
715 jno .Lpgm_restore
716 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
717 jo .Lsysc_do_syscall
718 j .Lsysc_tif
719 .Lpgm_restore:
720 DISABLE_INTS
721 TSTMSK __LC_CPU_FLAGS, _CIF_FPU
722 jz .Lpgm_skip_fpu
723 brasl %r14,load_fpu_regs
724 .Lpgm_skip_fpu:
725 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
726 stpt __LC_EXIT_TIMER
727 lmg %r0,%r15,__PT_R0(%r11)
728 b __LC_RETURN_LPSWE
729
730 #
731 # PER event in supervisor state, must be kprobes
732 #
733 .Lpgm_kprobe:
734 RESTORE_SM_CLEAR_PER
735 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
736 lgr %r2,%r11 # pass pointer to pt_regs
737 brasl %r14,do_per_trap
738 j .Lpgm_return
739
740 #
741 # single stepped system call
742 #
743 .Lpgm_svcper:
744 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
745 larl %r14,.Lsysc_per
746 stg %r14,__LC_RETURN_PSW+8
747 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
748 lpswe __LC_RETURN_PSW # branch to .Lsysc_per
749 ENDPROC(pgm_check_handler)
750
751 /*
752 * IO interrupt handler routine
753 */
754 ENTRY(io_int_handler)
755 STCK __LC_INT_CLOCK
756 stpt __LC_ASYNC_ENTER_TIMER
757 BPOFF
758 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
759 lg %r12,__LC_CURRENT
760 lmg %r8,%r9,__LC_IO_OLD_PSW
761 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
762 stmg %r0,%r7,__PT_R0(%r11)
763 # clear user controlled registers to prevent speculative use
764 xgr %r0,%r0
765 xgr %r1,%r1
766 xgr %r2,%r2
767 xgr %r3,%r3
768 xgr %r4,%r4
769 xgr %r5,%r5
770 xgr %r6,%r6
771 xgr %r7,%r7
772 xgr %r10,%r10
773 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
774 stmg %r8,%r9,__PT_PSW(%r11)
775 tm __PT_PSW+1(%r11),0x01 # coming from user space?
776 jno .Lio_skip_asce
777 lctlg %c1,%c1,__LC_KERNEL_ASCE
778 .Lio_skip_asce:
779 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
780 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
781 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
782 jo .Lio_restore
783 TRACE_IRQS_OFF
784 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
785 .Lio_loop:
786 lgr %r2,%r11 # pass pointer to pt_regs
787 lghi %r3,IO_INTERRUPT
788 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
789 jz .Lio_call
790 lghi %r3,THIN_INTERRUPT
791 .Lio_call:
792 brasl %r14,do_IRQ
793 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
794 jz .Lio_return
795 tpi 0
796 jz .Lio_return
797 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
798 j .Lio_loop
799 .Lio_return:
800 LOCKDEP_SYS_EXIT
801 TSTMSK __TI_flags(%r12),_TIF_WORK
802 jnz .Lio_work # there is work to do (signals etc.)
803 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
804 jnz .Lio_work
805 .Lio_restore:
806 TRACE_IRQS_ON
807 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
808 tm __PT_PSW+1(%r11),0x01 # returning to user ?
809 jno .Lio_exit_kernel
810 DEBUG_USER_ASCE
811 lctlg %c1,%c1,__LC_USER_ASCE
812 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
813 stpt __LC_EXIT_TIMER
814 .Lio_exit_kernel:
815 lmg %r0,%r15,__PT_R0(%r11)
816 b __LC_RETURN_LPSWE
817 .Lio_done:
818
819 #
820 # There is work todo, find out in which context we have been interrupted:
821 # 1) if we return to user space we can do all _TIF_WORK work
822 # 2) if we return to kernel code and kvm is enabled check if we need to
823 # modify the psw to leave SIE
824 # 3) if we return to kernel code and preemptive scheduling is enabled check
825 # the preemption counter and if it is zero call preempt_schedule_irq
826 # Before any work can be done, a switch to the kernel stack is required.
827 #
828 .Lio_work:
829 tm __PT_PSW+1(%r11),0x01 # returning to user ?
830 jo .Lio_work_user # yes -> do resched & signal
831 #ifdef CONFIG_PREEMPTION
832 # check for preemptive scheduling
833 icm %r0,15,__LC_PREEMPT_COUNT
834 jnz .Lio_restore # preemption is disabled
835 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
836 jno .Lio_restore
837 # switch to kernel stack
838 lg %r1,__PT_R15(%r11)
839 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
840 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
841 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
842 la %r11,STACK_FRAME_OVERHEAD(%r1)
843 lgr %r15,%r1
844 brasl %r14,preempt_schedule_irq
845 j .Lio_return
846 #else
847 j .Lio_restore
848 #endif
849
850 #
851 # Need to do work before returning to userspace, switch to kernel stack
852 #
853 .Lio_work_user:
854 lg %r1,__LC_KERNEL_STACK
855 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
856 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
857 la %r11,STACK_FRAME_OVERHEAD(%r1)
858 lgr %r15,%r1
859
860 #
861 # One of the work bits is on. Find out which one.
862 #
863 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
864 jo .Lio_reschedule
865 #ifdef CONFIG_LIVEPATCH
866 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
867 jo .Lio_patch_pending
868 #endif
869 TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
870 jnz .Lio_sigpending
871 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
872 jo .Lio_notify_resume
873 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
874 jo .Lio_guarded_storage
875 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
876 jo .Lio_vxrs
877 j .Lio_return
878
879 #
880 # CIF_FPU is set, restore floating-point controls and floating-point registers.
881 #
882 .Lio_vxrs:
883 larl %r14,.Lio_return
884 jg load_fpu_regs
885
886 #
887 # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
888 #
889 .Lio_guarded_storage:
890 ENABLE_INTS_TRACE
891 lgr %r2,%r11 # pass pointer to pt_regs
892 brasl %r14,gs_load_bc_cb
893 DISABLE_INTS_TRACE
894 j .Lio_return
895
896 #
897 # _TIF_NEED_RESCHED is set, call schedule
898 #
899 .Lio_reschedule:
900 ENABLE_INTS_TRACE
901 brasl %r14,schedule # call scheduler
902 DISABLE_INTS_TRACE
903 j .Lio_return
904
905 #
906 # _TIF_PATCH_PENDING is set, call klp_update_patch_state
907 #
908 #ifdef CONFIG_LIVEPATCH
909 .Lio_patch_pending:
910 lg %r2,__LC_CURRENT # pass pointer to task struct
911 larl %r14,.Lio_return
912 jg klp_update_patch_state
913 #endif
914
915 #
916 # _TIF_SIGPENDING or is set, call do_signal
917 #
918 .Lio_sigpending:
919 ENABLE_INTS_TRACE
920 lgr %r2,%r11 # pass pointer to pt_regs
921 brasl %r14,do_signal
922 DISABLE_INTS_TRACE
923 j .Lio_return
924
925 #
926 # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
927 #
928 .Lio_notify_resume:
929 ENABLE_INTS_TRACE
930 lgr %r2,%r11 # pass pointer to pt_regs
931 brasl %r14,do_notify_resume
932 DISABLE_INTS_TRACE
933 j .Lio_return
934 ENDPROC(io_int_handler)
935
936 /*
937 * External interrupt handler routine
938 */
939 ENTRY(ext_int_handler)
940 STCK __LC_INT_CLOCK
941 stpt __LC_ASYNC_ENTER_TIMER
942 BPOFF
943 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
944 lg %r12,__LC_CURRENT
945 lmg %r8,%r9,__LC_EXT_OLD_PSW
946 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
947 stmg %r0,%r7,__PT_R0(%r11)
948 # clear user controlled registers to prevent speculative use
949 xgr %r0,%r0
950 xgr %r1,%r1
951 xgr %r2,%r2
952 xgr %r3,%r3
953 xgr %r4,%r4
954 xgr %r5,%r5
955 xgr %r6,%r6
956 xgr %r7,%r7
957 xgr %r10,%r10
958 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
959 stmg %r8,%r9,__PT_PSW(%r11)
960 tm __PT_PSW+1(%r11),0x01 # coming from user space?
961 jno .Lext_skip_asce
962 lctlg %c1,%c1,__LC_KERNEL_ASCE
963 .Lext_skip_asce:
964 lghi %r1,__LC_EXT_PARAMS2
965 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
966 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
967 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
968 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
969 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
970 jo .Lio_restore
971 TRACE_IRQS_OFF
972 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
973 lgr %r2,%r11 # pass pointer to pt_regs
974 lghi %r3,EXT_INTERRUPT
975 brasl %r14,do_IRQ
976 j .Lio_return
977 ENDPROC(ext_int_handler)
978
979 /*
980 * Load idle PSW.
981 */
982 ENTRY(psw_idle)
983 stg %r3,__SF_EMPTY(%r15)
984 larl %r1,.Lpsw_idle_exit
985 stg %r1,__SF_EMPTY+8(%r15)
986 larl %r1,smp_cpu_mtid
987 llgf %r1,0(%r1)
988 ltgr %r1,%r1
989 jz .Lpsw_idle_stcctm
990 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
991 .Lpsw_idle_stcctm:
992 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
993 BPON
994 STCK __CLOCK_IDLE_ENTER(%r2)
995 stpt __TIMER_IDLE_ENTER(%r2)
996 lpswe __SF_EMPTY(%r15)
997 .Lpsw_idle_exit:
998 BR_EX %r14
999 ENDPROC(psw_idle)
1000
1001 /*
1002 * Store floating-point controls and floating-point or vector register
1003 * depending whether the vector facility is available. A critical section
1004 * cleanup assures that the registers are stored even if interrupted for
1005 * some other work. The CIF_FPU flag is set to trigger a lazy restore
1006 * of the register contents at return from io or a system call.
1007 */
1008 ENTRY(save_fpu_regs)
1009 stnsm __SF_EMPTY(%r15),0xfc
1010 lg %r2,__LC_CURRENT
1011 aghi %r2,__TASK_thread
1012 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1013 jo .Lsave_fpu_regs_exit
1014 stfpc __THREAD_FPU_fpc(%r2)
1015 lg %r3,__THREAD_FPU_regs(%r2)
1016 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1017 jz .Lsave_fpu_regs_fp # no -> store FP regs
1018 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
1019 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
1020 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
1021 .Lsave_fpu_regs_fp:
1022 std 0,0(%r3)
1023 std 1,8(%r3)
1024 std 2,16(%r3)
1025 std 3,24(%r3)
1026 std 4,32(%r3)
1027 std 5,40(%r3)
1028 std 6,48(%r3)
1029 std 7,56(%r3)
1030 std 8,64(%r3)
1031 std 9,72(%r3)
1032 std 10,80(%r3)
1033 std 11,88(%r3)
1034 std 12,96(%r3)
1035 std 13,104(%r3)
1036 std 14,112(%r3)
1037 std 15,120(%r3)
1038 .Lsave_fpu_regs_done:
1039 oi __LC_CPU_FLAGS+7,_CIF_FPU
1040 .Lsave_fpu_regs_exit:
1041 ssm __SF_EMPTY(%r15)
1042 BR_EX %r14
1043 .Lsave_fpu_regs_end:
1044 ENDPROC(save_fpu_regs)
1045 EXPORT_SYMBOL(save_fpu_regs)
1046
1047 /*
1048 * Load floating-point controls and floating-point or vector registers.
1049 * A critical section cleanup assures that the register contents are
1050 * loaded even if interrupted for some other work.
1051 *
1052 * There are special calling conventions to fit into sysc and io return work:
1053 * %r15: <kernel stack>
1054 * The function requires:
1055 * %r4
1056 */
1057 load_fpu_regs:
1058 stnsm __SF_EMPTY(%r15),0xfc
1059 lg %r4,__LC_CURRENT
1060 aghi %r4,__TASK_thread
1061 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1062 jno .Lload_fpu_regs_exit
1063 lfpc __THREAD_FPU_fpc(%r4)
1064 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1065 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1066 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
1067 VLM %v0,%v15,0,%r4
1068 VLM %v16,%v31,256,%r4
1069 j .Lload_fpu_regs_done
1070 .Lload_fpu_regs_fp:
1071 ld 0,0(%r4)
1072 ld 1,8(%r4)
1073 ld 2,16(%r4)
1074 ld 3,24(%r4)
1075 ld 4,32(%r4)
1076 ld 5,40(%r4)
1077 ld 6,48(%r4)
1078 ld 7,56(%r4)
1079 ld 8,64(%r4)
1080 ld 9,72(%r4)
1081 ld 10,80(%r4)
1082 ld 11,88(%r4)
1083 ld 12,96(%r4)
1084 ld 13,104(%r4)
1085 ld 14,112(%r4)
1086 ld 15,120(%r4)
1087 .Lload_fpu_regs_done:
1088 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1089 .Lload_fpu_regs_exit:
1090 ssm __SF_EMPTY(%r15)
1091 BR_EX %r14
1092 .Lload_fpu_regs_end:
1093 ENDPROC(load_fpu_regs)
1094
1095 /*
1096 * Machine check handler routines
1097 */
1098 ENTRY(mcck_int_handler)
1099 STCK __LC_MCCK_CLOCK
1100 BPOFF
1101 la %r1,4095 # validate r1
1102 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
1103 sckc __LC_CLOCK_COMPARATOR # validate comparator
1104 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1105 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1106 lg %r12,__LC_CURRENT
1107 lmg %r8,%r9,__LC_MCK_OLD_PSW
1108 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1109 jo .Lmcck_panic # yes -> rest of mcck code invalid
1110 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
1111 jno .Lmcck_panic # control registers invalid -> panic
1112 la %r14,4095
1113 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1114 ptlb
1115 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1116 nill %r11,0xfc00 # MCESA_ORIGIN_MASK
1117 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1118 jno 0f
1119 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
1120 jno 0f
1121 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
1122 0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1123 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
1124 jo 0f
1125 sr %r14,%r14
1126 0: sfpc %r14
1127 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1128 jo 0f
1129 lghi %r14,__LC_FPREGS_SAVE_AREA
1130 ld %f0,0(%r14)
1131 ld %f1,8(%r14)
1132 ld %f2,16(%r14)
1133 ld %f3,24(%r14)
1134 ld %f4,32(%r14)
1135 ld %f5,40(%r14)
1136 ld %f6,48(%r14)
1137 ld %f7,56(%r14)
1138 ld %f8,64(%r14)
1139 ld %f9,72(%r14)
1140 ld %f10,80(%r14)
1141 ld %f11,88(%r14)
1142 ld %f12,96(%r14)
1143 ld %f13,104(%r14)
1144 ld %f14,112(%r14)
1145 ld %f15,120(%r14)
1146 j 1f
1147 0: VLM %v0,%v15,0,%r11
1148 VLM %v16,%v31,256,%r11
1149 1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
1150 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1151 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1152 jo 3f
1153 la %r14,__LC_SYNC_ENTER_TIMER
1154 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
1155 jl 0f
1156 la %r14,__LC_ASYNC_ENTER_TIMER
1157 0: clc 0(8,%r14),__LC_EXIT_TIMER
1158 jl 1f
1159 la %r14,__LC_EXIT_TIMER
1160 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
1161 jl 2f
1162 la %r14,__LC_LAST_UPDATE_TIMER
1163 2: spt 0(%r14)
1164 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1165 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1166 jno .Lmcck_panic
1167 tmhh %r8,0x0001 # interrupting from user ?
1168 jnz 4f
1169 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1170 jno .Lmcck_panic
1171 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
1172 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
1173 .Lmcck_skip:
1174 lghi %r14,__LC_GPREGS_SAVE_AREA+64
1175 stmg %r0,%r7,__PT_R0(%r11)
1176 # clear user controlled registers to prevent speculative use
1177 xgr %r0,%r0
1178 xgr %r1,%r1
1179 xgr %r2,%r2
1180 xgr %r3,%r3
1181 xgr %r4,%r4
1182 xgr %r5,%r5
1183 xgr %r6,%r6
1184 xgr %r7,%r7
1185 xgr %r10,%r10
1186 mvc __PT_R8(64,%r11),0(%r14)
1187 stmg %r8,%r9,__PT_PSW(%r11)
1188 la %r14,4095
1189 mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
1190 lctlg %c1,%c1,__LC_KERNEL_ASCE
1191 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1192 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1193 lgr %r2,%r11 # pass pointer to pt_regs
1194 brasl %r14,s390_do_machine_check
1195 cghi %r2,0
1196 je .Lmcck_return
1197 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
1198 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1199 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1200 la %r11,STACK_FRAME_OVERHEAD(%r1)
1201 lgr %r15,%r1
1202 TRACE_IRQS_OFF
1203 brasl %r14,s390_handle_mcck
1204 TRACE_IRQS_ON
1205 .Lmcck_return:
1206 lctlg %c1,%c1,__PT_CR1(%r11)
1207 lmg %r0,%r10,__PT_R0(%r11)
1208 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1209 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1210 jno 0f
1211 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1212 stpt __LC_EXIT_TIMER
1213 0: lmg %r11,%r15,__PT_R11(%r11)
1214 b __LC_RETURN_MCCK_LPSWE
1215
1216 .Lmcck_panic:
1217 lg %r15,__LC_NODAT_STACK
1218 la %r11,STACK_FRAME_OVERHEAD(%r15)
1219 j .Lmcck_skip
1220 ENDPROC(mcck_int_handler)
1221
1222 #
1223 # PSW restart interrupt handler
1224 #
1225 ENTRY(restart_int_handler)
1226 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1227 stg %r15,__LC_SAVE_AREA_RESTART
1228 lg %r15,__LC_RESTART_STACK
1229 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1230 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1231 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1232 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1233 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1234 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1235 lg %r2,__LC_RESTART_DATA
1236 lg %r3,__LC_RESTART_SOURCE
1237 ltgr %r3,%r3 # test source cpu address
1238 jm 1f # negative -> skip source stop
1239 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1240 brc 10,0b # wait for status stored
1241 1: basr %r14,%r1 # call function
1242 stap __SF_EMPTY(%r15) # store cpu address
1243 llgh %r3,__SF_EMPTY(%r15)
1244 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
1245 brc 2,2b
1246 3: j 3b
1247 ENDPROC(restart_int_handler)
1248
1249 .section .kprobes.text, "ax"
1250
1251 #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1252 /*
1253 * The synchronous or the asynchronous stack overflowed. We are dead.
1254 * No need to properly save the registers, we are going to panic anyway.
1255 * Setup a pt_regs so that show_trace can provide a good call trace.
1256 */
1257 ENTRY(stack_overflow)
1258 lg %r15,__LC_NODAT_STACK # change to panic stack
1259 la %r11,STACK_FRAME_OVERHEAD(%r15)
1260 stmg %r0,%r7,__PT_R0(%r11)
1261 stmg %r8,%r9,__PT_PSW(%r11)
1262 mvc __PT_R8(64,%r11),0(%r14)
1263 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1264 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1265 lgr %r2,%r11 # pass pointer to pt_regs
1266 jg kernel_stack_overflow
1267 ENDPROC(stack_overflow)
1268 #endif
1269
1270 #if IS_ENABLED(CONFIG_KVM)
1271 .Lcleanup_sie:
1272 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
1273 je 1f
1274 larl %r13,.Lsie_entry
1275 slgr %r9,%r13
1276 larl %r13,.Lsie_skip
1277 clgr %r9,%r13
1278 jh 1f
1279 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
1280 1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1281 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
1282 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1283 lctlg %c1,%c1,__LC_KERNEL_ASCE
1284 larl %r9,sie_exit # skip forward to sie_exit
1285 BR_EX %r14,%r11
1286
1287 #endif
1288 .section .rodata, "a"
1289 #define SYSCALL(esame,emu) .quad __s390x_ ## esame
1290 .globl sys_call_table
1291 sys_call_table:
1292 #include "asm/syscall_table.h"
1293 #undef SYSCALL
1294
1295 #ifdef CONFIG_COMPAT
1296
1297 #define SYSCALL(esame,emu) .quad __s390_ ## emu
1298 .globl sys_call_table_emu
1299 sys_call_table_emu:
1300 #include "asm/syscall_table.h"
1301 #undef SYSCALL
1302 #endif