]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
54dfe5dd | 2 | * arch/s390/kernel/entry64.S |
1da177e4 LT |
3 | * S390 low-level entry points. |
4 | * | |
54dfe5dd | 5 | * Copyright (C) IBM Corp. 1999,2006 |
1da177e4 | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
25d83cbf HC |
7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | |
77fa2245 | 9 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/sys.h> | |
13 | #include <linux/linkage.h> | |
2bc89b5e | 14 | #include <linux/init.h> |
1da177e4 LT |
15 | #include <asm/cache.h> |
16 | #include <asm/lowcore.h> | |
17 | #include <asm/errno.h> | |
18 | #include <asm/ptrace.h> | |
19 | #include <asm/thread_info.h> | |
0013a854 | 20 | #include <asm/asm-offsets.h> |
1da177e4 LT |
21 | #include <asm/unistd.h> |
22 | #include <asm/page.h> | |
23 | ||
24 | /* | |
25 | * Stack layout for the system_call stack entry. | |
26 | * The first few entries are identical to the user_regs_struct. | |
27 | */ | |
25d83cbf HC |
28 | SP_PTREGS = STACK_FRAME_OVERHEAD |
29 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | |
30 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | |
31 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | |
32 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | |
33 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | |
34 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | |
35 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | |
36 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | |
37 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | |
38 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | |
39 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 | |
40 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 | |
41 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 | |
42 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 | |
43 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 | |
44 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | |
45 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | |
46 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | |
47 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | |
48 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | |
49 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | |
50 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | |
1da177e4 LT |
51 | |
52 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | |
53 | STACK_SIZE = 1 << STACK_SHIFT | |
54 | ||
753c4dd6 | 55 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54dfe5dd | 56 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) |
753c4dd6 | 57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54dfe5dd | 58 | _TIF_MCCK_PENDING) |
1da177e4 LT |
59 | |
60 | #define BASED(name) name-system_call(%r13) | |
61 | ||
1f194a4c HC |
62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | .macro TRACE_IRQS_ON | |
64 | brasl %r14,trace_hardirqs_on | |
65 | .endm | |
66 | ||
67 | .macro TRACE_IRQS_OFF | |
68 | brasl %r14,trace_hardirqs_off | |
69 | .endm | |
523b44cf | 70 | |
411788ea HC |
71 | .macro TRACE_IRQS_CHECK |
72 | tm SP_PSW(%r15),0x03 # irqs enabled? | |
73 | jz 0f | |
74 | brasl %r14,trace_hardirqs_on | |
75 | j 1f | |
76 | 0: brasl %r14,trace_hardirqs_off | |
77 | 1: | |
523b44cf | 78 | .endm |
1f194a4c HC |
79 | #else |
80 | #define TRACE_IRQS_ON | |
81 | #define TRACE_IRQS_OFF | |
411788ea HC |
82 | #define TRACE_IRQS_CHECK |
83 | #endif | |
84 | ||
85 | #ifdef CONFIG_LOCKDEP | |
86 | .macro LOCKDEP_SYS_EXIT | |
87 | tm SP_PSW+1(%r15),0x01 # returning to user ? | |
88 | jz 0f | |
89 | brasl %r14,lockdep_sys_exit | |
90 | 0: | |
91 | .endm | |
92 | #else | |
523b44cf | 93 | #define LOCKDEP_SYS_EXIT |
1f194a4c HC |
94 | #endif |
95 | ||
25d83cbf | 96 | .macro STORE_TIMER lc_offset |
1da177e4 LT |
97 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
98 | stpt \lc_offset | |
99 | #endif | |
100 | .endm | |
101 | ||
102 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
25d83cbf | 103 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
1da177e4 LT |
104 | lg %r10,\lc_from |
105 | slg %r10,\lc_to | |
106 | alg %r10,\lc_sum | |
107 | stg %r10,\lc_sum | |
108 | .endm | |
109 | #endif | |
110 | ||
111 | /* | |
112 | * Register usage in interrupt handlers: | |
113 | * R9 - pointer to current task structure | |
114 | * R13 - pointer to literal pool | |
115 | * R14 - return register for function calls | |
116 | * R15 - kernel stack pointer | |
117 | */ | |
118 | ||
25d83cbf | 119 | .macro SAVE_ALL_BASE savearea |
1da177e4 LT |
120 | stmg %r12,%r15,\savearea |
121 | larl %r13,system_call | |
122 | .endm | |
123 | ||
987ad70a MS |
124 | .macro SAVE_ALL_SVC psworg,savearea |
125 | la %r12,\psworg | |
126 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | |
127 | .endm | |
128 | ||
63b12246 | 129 | .macro SAVE_ALL_SYNC psworg,savearea |
1da177e4 | 130 | la %r12,\psworg |
1da177e4 LT |
131 | tm \psworg+1,0x01 # test problem state bit |
132 | jz 2f # skip stack setup save | |
133 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | |
63b12246 MS |
134 | #ifdef CONFIG_CHECK_STACK |
135 | j 3f | |
136 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | |
137 | jz stack_overflow | |
138 | 3: | |
139 | #endif | |
140 | 2: | |
141 | .endm | |
142 | ||
143 | .macro SAVE_ALL_ASYNC psworg,savearea | |
144 | la %r12,\psworg | |
1da177e4 LT |
145 | tm \psworg+1,0x01 # test problem state bit |
146 | jnz 1f # from user -> load kernel stack | |
147 | clc \psworg+8(8),BASED(.Lcritical_end) | |
148 | jhe 0f | |
149 | clc \psworg+8(8),BASED(.Lcritical_start) | |
150 | jl 0f | |
151 | brasl %r14,cleanup_critical | |
6add9f7f | 152 | tm 1(%r12),0x01 # retest problem state after cleanup |
1da177e4 LT |
153 | jnz 1f |
154 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? | |
155 | slgr %r14,%r15 | |
156 | srag %r14,%r14,STACK_SHIFT | |
157 | jz 2f | |
158 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | |
1da177e4 LT |
159 | #ifdef CONFIG_CHECK_STACK |
160 | j 3f | |
161 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | |
162 | jz stack_overflow | |
163 | 3: | |
164 | #endif | |
77fa2245 HC |
165 | 2: |
166 | .endm | |
167 | ||
168 | .macro CREATE_STACK_FRAME psworg,savearea | |
25d83cbf HC |
169 | aghi %r15,-SP_SIZE # make room for registers & psw |
170 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | |
1da177e4 LT |
171 | la %r12,\psworg |
172 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | |
173 | icm %r12,12,__LC_SVC_ILC | |
174 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | |
175 | st %r12,SP_ILC(%r15) | |
176 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack | |
177 | la %r12,0 | |
178 | stg %r12,__SF_BACKCHAIN(%r15) | |
25d83cbf | 179 | .endm |
1da177e4 | 180 | |
ae6aa2ea MS |
181 | .macro RESTORE_ALL psworg,sync |
182 | mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore | |
1da177e4 | 183 | .if !\sync |
ae6aa2ea | 184 | ni \psworg+1,0xfd # clear wait state bit |
1da177e4 LT |
185 | .endif |
186 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | |
187 | STORE_TIMER __LC_EXIT_TIMER | |
ae6aa2ea | 188 | lpswe \psworg # back to caller |
1da177e4 LT |
189 | .endm |
190 | ||
191 | /* | |
192 | * Scheduler resume function, called by switch_to | |
193 | * gpr2 = (task_struct *) prev | |
194 | * gpr3 = (task_struct *) next | |
195 | * Returns: | |
196 | * gpr2 = prev | |
197 | */ | |
25d83cbf | 198 | .globl __switch_to |
1da177e4 LT |
199 | __switch_to: |
200 | tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? | |
201 | jz __switch_to_noper # if not we're fine | |
25d83cbf HC |
202 | stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff |
203 | clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) | |
204 | je __switch_to_noper # we got away without bashing TLB's | |
205 | lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't | |
1da177e4 | 206 | __switch_to_noper: |
25d83cbf | 207 | lg %r4,__THREAD_info(%r2) # get thread_info of prev |
77fa2245 HC |
208 | tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? |
209 | jz __switch_to_no_mcck | |
210 | ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev | |
211 | lg %r4,__THREAD_info(%r3) # get thread_info of next | |
212 | oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next | |
213 | __switch_to_no_mcck: | |
25d83cbf | 214 | stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task |
1da177e4 LT |
215 | stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp |
216 | lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | |
25d83cbf | 217 | lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task |
1da177e4 LT |
218 | stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct |
219 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | |
25d83cbf | 220 | lg %r3,__THREAD_info(%r3) # load thread_info from task struct |
1da177e4 LT |
221 | stg %r3,__LC_THREAD_INFO |
222 | aghi %r3,STACK_SIZE | |
223 | stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | |
224 | br %r14 | |
225 | ||
226 | __critical_start: | |
227 | /* | |
228 | * SVC interrupt handler routine. System calls are synchronous events and | |
229 | * are executed with interrupts enabled. | |
230 | */ | |
231 | ||
25d83cbf | 232 | .globl system_call |
1da177e4 LT |
233 | system_call: |
234 | STORE_TIMER __LC_SYNC_ENTER_TIMER | |
235 | sysc_saveall: | |
236 | SAVE_ALL_BASE __LC_SAVE_AREA | |
987ad70a | 237 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
25d83cbf HC |
238 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
239 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | |
1da177e4 LT |
240 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
241 | sysc_vtime: | |
1da177e4 LT |
242 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
243 | sysc_stime: | |
244 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
245 | sysc_update: | |
246 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
247 | #endif | |
248 | sysc_do_svc: | |
249 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
25d83cbf | 250 | slag %r7,%r7,2 # *4 and test for svc 0 |
1da177e4 LT |
251 | jnz sysc_nr_ok |
252 | # svc 0: system call number in %r1 | |
253 | cl %r1,BASED(.Lnr_syscalls) | |
254 | jnl sysc_nr_ok | |
25d83cbf HC |
255 | lgfr %r7,%r1 # clear high word in r1 |
256 | slag %r7,%r7,2 # svc 0: system call number in %r1 | |
1da177e4 LT |
257 | sysc_nr_ok: |
258 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | |
259 | sysc_do_restart: | |
25d83cbf | 260 | larl %r10,sys_call_table |
347a8dc3 | 261 | #ifdef CONFIG_COMPAT |
c563077e HC |
262 | tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? |
263 | jno sysc_noemu | |
25d83cbf | 264 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
1da177e4 LT |
265 | sysc_noemu: |
266 | #endif | |
267 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | |
25d83cbf HC |
268 | lgf %r8,0(%r7,%r10) # load address of system call routine |
269 | jnz sysc_tracesys | |
270 | basr %r14,%r8 # call sys_xxxx | |
271 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | |
1da177e4 LT |
272 | |
273 | sysc_return: | |
1da177e4 | 274 | tm __TI_flags+7(%r9),_TIF_WORK_SVC |
25d83cbf | 275 | jnz sysc_work # there is work to do (signals etc.) |
411788ea HC |
276 | sysc_restore: |
277 | #ifdef CONFIG_TRACE_IRQFLAGS | |
278 | larl %r1,sysc_restore_trace_psw | |
279 | lpswe 0(%r1) | |
280 | sysc_restore_trace: | |
281 | TRACE_IRQS_CHECK | |
523b44cf | 282 | LOCKDEP_SYS_EXIT |
411788ea | 283 | #endif |
1da177e4 | 284 | sysc_leave: |
25d83cbf | 285 | RESTORE_ALL __LC_RETURN_PSW,1 |
411788ea HC |
286 | sysc_done: |
287 | ||
288 | #ifdef CONFIG_TRACE_IRQFLAGS | |
289 | .align 8 | |
290 | .globl sysc_restore_trace_psw | |
291 | sysc_restore_trace_psw: | |
292 | .quad 0, sysc_restore_trace | |
293 | #endif | |
1da177e4 LT |
294 | |
295 | # | |
296 | # recheck if there is more work to do | |
297 | # | |
298 | sysc_work_loop: | |
299 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | |
411788ea | 300 | jz sysc_restore # there is no work to do |
1da177e4 LT |
301 | # |
302 | # One of the work bits is on. Find out which one. | |
303 | # | |
304 | sysc_work: | |
2688905e MS |
305 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
306 | jno sysc_restore | |
77fa2245 HC |
307 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING |
308 | jo sysc_mcck_pending | |
1da177e4 LT |
309 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
310 | jo sysc_reschedule | |
02a029b3 | 311 | tm __TI_flags+7(%r9),_TIF_SIGPENDING |
54dfe5dd | 312 | jnz sysc_sigpending |
753c4dd6 MS |
313 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME |
314 | jnz sysc_notify_resume | |
1da177e4 LT |
315 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC |
316 | jo sysc_restart | |
317 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | |
318 | jo sysc_singlestep | |
411788ea HC |
319 | j sysc_restore |
320 | sysc_work_done: | |
1da177e4 LT |
321 | |
322 | # | |
323 | # _TIF_NEED_RESCHED is set, call schedule | |
25d83cbf HC |
324 | # |
325 | sysc_reschedule: | |
326 | larl %r14,sysc_work_loop | |
327 | jg schedule # return point is sysc_return | |
1da177e4 | 328 | |
77fa2245 HC |
329 | # |
330 | # _TIF_MCCK_PENDING is set, call handler | |
331 | # | |
332 | sysc_mcck_pending: | |
333 | larl %r14,sysc_work_loop | |
25d83cbf | 334 | jg s390_handle_mcck # TIF bit will be cleared by handler |
77fa2245 | 335 | |
1da177e4 | 336 | # |
02a029b3 | 337 | # _TIF_SIGPENDING is set, call do_signal |
1da177e4 | 338 | # |
25d83cbf | 339 | sysc_sigpending: |
1da177e4 | 340 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
25d83cbf HC |
341 | la %r2,SP_PTREGS(%r15) # load pt_regs |
342 | brasl %r14,do_signal # call do_signal | |
1da177e4 LT |
343 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC |
344 | jo sysc_restart | |
345 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | |
346 | jo sysc_singlestep | |
e1c3ad96 | 347 | j sysc_work_loop |
1da177e4 | 348 | |
753c4dd6 MS |
349 | # |
350 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | |
351 | # | |
352 | sysc_notify_resume: | |
353 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
354 | larl %r14,sysc_work_loop | |
355 | jg do_notify_resume # call do_notify_resume | |
356 | ||
1da177e4 LT |
357 | # |
358 | # _TIF_RESTART_SVC is set, set up registers and restart svc | |
359 | # | |
360 | sysc_restart: | |
361 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | |
25d83cbf HC |
362 | lg %r7,SP_R2(%r15) # load new svc number |
363 | slag %r7,%r7,2 # *4 | |
1da177e4 | 364 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument |
25d83cbf HC |
365 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
366 | j sysc_do_restart # restart svc | |
1da177e4 LT |
367 | |
368 | # | |
369 | # _TIF_SINGLE_STEP is set, call do_single_step | |
370 | # | |
371 | sysc_singlestep: | |
372 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | |
373 | lhi %r0,__LC_PGM_OLD_PSW | |
374 | sth %r0,SP_TRAP(%r15) # set trap indication to pgm check | |
375 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
376 | larl %r14,sysc_return # load adr. of system return | |
377 | jg do_single_step # branch to do_sigtrap | |
378 | ||
1da177e4 | 379 | # |
753c4dd6 MS |
380 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
381 | # and after the system call | |
1da177e4 LT |
382 | # |
383 | sysc_tracesys: | |
25d83cbf | 384 | la %r2,SP_PTREGS(%r15) # load pt_regs |
1da177e4 LT |
385 | la %r3,0 |
386 | srl %r7,2 | |
25d83cbf | 387 | stg %r7,SP_R2(%r15) |
753c4dd6 | 388 | brasl %r14,do_syscall_trace_enter |
1da177e4 | 389 | lghi %r0,NR_syscalls |
753c4dd6 | 390 | clgr %r0,%r2 |
1da177e4 | 391 | jnh sysc_tracenogo |
753c4dd6 | 392 | slag %r7,%r2,2 # *4 |
1da177e4 LT |
393 | lgf %r8,0(%r7,%r10) |
394 | sysc_tracego: | |
25d83cbf HC |
395 | lmg %r3,%r6,SP_R3(%r15) |
396 | lg %r2,SP_ORIG_R2(%r15) | |
397 | basr %r14,%r8 # call sys_xxx | |
398 | stg %r2,SP_R2(%r15) # store return value | |
1da177e4 LT |
399 | sysc_tracenogo: |
400 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | |
25d83cbf HC |
401 | jz sysc_return |
402 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
25d83cbf | 403 | larl %r14,sysc_return # return point is sysc_return |
753c4dd6 | 404 | jg do_syscall_trace_exit |
1da177e4 LT |
405 | |
406 | # | |
407 | # a new process exits the kernel with ret_from_fork | |
408 | # | |
25d83cbf | 409 | .globl ret_from_fork |
1da177e4 LT |
410 | ret_from_fork: |
411 | lg %r13,__LC_SVC_NEW_PSW+8 | |
412 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
413 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | |
414 | jo 0f | |
415 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | |
25d83cbf | 416 | 0: brasl %r14,schedule_tail |
1f194a4c | 417 | TRACE_IRQS_ON |
25d83cbf | 418 | stosm 24(%r15),0x03 # reenable interrupts |
1da177e4 LT |
419 | j sysc_return |
420 | ||
421 | # | |
03ff9a23 MS |
422 | # kernel_execve function needs to deal with pt_regs that is not |
423 | # at the usual place | |
1da177e4 | 424 | # |
03ff9a23 MS |
425 | .globl kernel_execve |
426 | kernel_execve: | |
427 | stmg %r12,%r15,96(%r15) | |
428 | lgr %r14,%r15 | |
429 | aghi %r15,-SP_SIZE | |
430 | stg %r14,__SF_BACKCHAIN(%r15) | |
431 | la %r12,SP_PTREGS(%r15) | |
432 | xc 0(__PT_SIZE,%r12),0(%r12) | |
433 | lgr %r5,%r12 | |
434 | brasl %r14,do_execve | |
435 | ltgfr %r2,%r2 | |
436 | je 0f | |
437 | aghi %r15,SP_SIZE | |
438 | lmg %r12,%r15,96(%r15) | |
439 | br %r14 | |
440 | # execve succeeded. | |
441 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | |
442 | lg %r15,__LC_KERNEL_STACK # load ksp | |
443 | aghi %r15,-SP_SIZE # make room for registers & psw | |
444 | lg %r13,__LC_SVC_NEW_PSW+8 | |
445 | lg %r9,__LC_THREAD_INFO | |
446 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | |
447 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | |
448 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | |
449 | brasl %r14,execve_tail | |
450 | j sysc_return | |
1da177e4 LT |
451 | |
452 | /* | |
453 | * Program check handler routine | |
454 | */ | |
455 | ||
25d83cbf | 456 | .globl pgm_check_handler |
1da177e4 LT |
457 | pgm_check_handler: |
458 | /* | |
459 | * First we need to check for a special case: | |
460 | * Single stepping an instruction that disables the PER event mask will | |
461 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | |
462 | * For a single stepped SVC the program check handler gets control after | |
463 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | |
464 | * then handle the PER event. Therefore we update the SVC old PSW to point | |
465 | * to the pgm_check_handler and branch to the SVC handler after we checked | |
466 | * if we have to load the kernel stack register. | |
467 | * For every other possible cause for PER event without the PER mask set | |
468 | * we just ignore the PER event (FIXME: is there anything we have to do | |
469 | * for LPSW?). | |
470 | */ | |
471 | STORE_TIMER __LC_SYNC_ENTER_TIMER | |
472 | SAVE_ALL_BASE __LC_SAVE_AREA | |
25d83cbf HC |
473 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
474 | jnz pgm_per # got per exception -> special case | |
63b12246 | 475 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 476 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
1da177e4 LT |
477 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
478 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
479 | jz pgm_no_vtime | |
480 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | |
481 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
482 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
483 | pgm_no_vtime: | |
484 | #endif | |
485 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
9e74a6b8 | 486 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK |
411788ea | 487 | TRACE_IRQS_OFF |
25d83cbf | 488 | lgf %r3,__LC_PGM_ILC # load program interruption code |
1da177e4 LT |
489 | lghi %r8,0x7f |
490 | ngr %r8,%r3 | |
491 | pgm_do_call: | |
25d83cbf HC |
492 | sll %r8,3 |
493 | larl %r1,pgm_check_table | |
494 | lg %r1,0(%r8,%r1) # load address of handler routine | |
495 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
1da177e4 | 496 | larl %r14,sysc_return |
25d83cbf | 497 | br %r1 # branch to interrupt-handler |
1da177e4 LT |
498 | |
499 | # | |
500 | # handle per exception | |
501 | # | |
502 | pgm_per: | |
25d83cbf HC |
503 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on |
504 | jnz pgm_per_std # ok, normal per event from user space | |
1da177e4 | 505 | # ok its one of the special cases, now we need to find out which one |
25d83cbf HC |
506 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW |
507 | je pgm_svcper | |
1da177e4 LT |
508 | # no interesting special case, ignore PER event |
509 | lmg %r12,%r15,__LC_SAVE_AREA | |
25d83cbf | 510 | lpswe __LC_PGM_OLD_PSW |
1da177e4 LT |
511 | |
512 | # | |
513 | # Normal per exception | |
514 | # | |
515 | pgm_per_std: | |
63b12246 | 516 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 517 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
1da177e4 LT |
518 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
519 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
520 | jz pgm_no_vtime2 | |
521 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | |
522 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
523 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
524 | pgm_no_vtime2: | |
525 | #endif | |
526 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
411788ea | 527 | TRACE_IRQS_OFF |
1da177e4 | 528 | lg %r1,__TI_task(%r9) |
4ba069b8 MG |
529 | tm SP_PSW+1(%r15),0x01 # kernel per event ? |
530 | jz kernel_per | |
1da177e4 LT |
531 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID |
532 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | |
533 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | |
534 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | |
25d83cbf | 535 | lgf %r3,__LC_PGM_ILC # load program interruption code |
1da177e4 | 536 | lghi %r8,0x7f |
25d83cbf | 537 | ngr %r8,%r3 # clear per-event-bit and ilc |
1da177e4 LT |
538 | je sysc_return |
539 | j pgm_do_call | |
540 | ||
541 | # | |
542 | # it was a single stepped SVC that is causing all the trouble | |
543 | # | |
544 | pgm_svcper: | |
63b12246 | 545 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 546 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
1da177e4 | 547 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1da177e4 LT |
548 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
549 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
550 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
1da177e4 | 551 | #endif |
25d83cbf | 552 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
1da177e4 LT |
553 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
554 | lg %r1,__TI_task(%r9) | |
555 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | |
556 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | |
557 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | |
558 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | |
1f194a4c | 559 | TRACE_IRQS_ON |
1da177e4 LT |
560 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
561 | j sysc_do_svc | |
562 | ||
4ba069b8 MG |
563 | # |
564 | # per was called from kernel, must be kprobes | |
565 | # | |
566 | kernel_per: | |
567 | lhi %r0,__LC_PGM_OLD_PSW | |
568 | sth %r0,SP_TRAP(%r15) # set trap indication to pgm check | |
569 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
411788ea | 570 | larl %r14,sysc_restore # load adr. of system ret, no work |
4ba069b8 MG |
571 | jg do_single_step # branch to do_single_step |
572 | ||
1da177e4 LT |
573 | /* |
574 | * IO interrupt handler routine | |
575 | */ | |
25d83cbf | 576 | .globl io_int_handler |
1da177e4 LT |
577 | io_int_handler: |
578 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | |
579 | stck __LC_INT_CLOCK | |
580 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | |
63b12246 | 581 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
77fa2245 | 582 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
1da177e4 LT |
583 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
584 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
585 | jz io_no_vtime | |
586 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
587 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
588 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
589 | io_no_vtime: | |
590 | #endif | |
591 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
1f194a4c | 592 | TRACE_IRQS_OFF |
25d83cbf HC |
593 | la %r2,SP_PTREGS(%r15) # address of register-save area |
594 | brasl %r14,do_IRQ # call standard irq handler | |
1da177e4 | 595 | io_return: |
1da177e4 | 596 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
25d83cbf | 597 | jnz io_work # there is work to do (signals etc.) |
411788ea HC |
598 | io_restore: |
599 | #ifdef CONFIG_TRACE_IRQFLAGS | |
600 | larl %r1,io_restore_trace_psw | |
601 | lpswe 0(%r1) | |
602 | io_restore_trace: | |
603 | TRACE_IRQS_CHECK | |
523b44cf | 604 | LOCKDEP_SYS_EXIT |
411788ea | 605 | #endif |
1da177e4 | 606 | io_leave: |
25d83cbf | 607 | RESTORE_ALL __LC_RETURN_PSW,0 |
ae6aa2ea | 608 | io_done: |
1da177e4 | 609 | |
411788ea HC |
610 | #ifdef CONFIG_TRACE_IRQFLAGS |
611 | .align 8 | |
612 | .globl io_restore_trace_psw | |
613 | io_restore_trace_psw: | |
614 | .quad 0, io_restore_trace | |
615 | #endif | |
616 | ||
2688905e | 617 | # |
0eaeafa1 CB |
618 | # There is work todo, we need to check if we return to userspace, then |
619 | # check, if we are in SIE, if yes leave it | |
2688905e MS |
620 | # |
621 | io_work: | |
622 | tm SP_PSW+1(%r15),0x01 # returning to user ? | |
623 | #ifndef CONFIG_PREEMPT | |
0eaeafa1 CB |
624 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
625 | jnz io_work_user # yes -> no need to check for SIE | |
626 | la %r1, BASED(sie_opcode) # we return to kernel here | |
627 | lg %r2, SP_PSW+8(%r15) | |
628 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | |
629 | jne io_restore # no-> return to kernel | |
630 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | |
631 | aghi %r1, 4 | |
632 | stg %r1, SP_PSW+8(%r15) | |
633 | j io_restore # return to kernel | |
634 | #else | |
2688905e | 635 | jno io_restore # no-> skip resched & signal |
0eaeafa1 | 636 | #endif |
2688905e MS |
637 | #else |
638 | jnz io_work_user # yes -> do resched & signal | |
0eaeafa1 CB |
639 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
640 | la %r1, BASED(sie_opcode) | |
641 | lg %r2, SP_PSW+8(%r15) | |
642 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | |
643 | jne 0f # no -> leave PSW alone | |
644 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | |
645 | aghi %r1, 4 | |
646 | stg %r1, SP_PSW+8(%r15) | |
647 | 0: | |
648 | #endif | |
2688905e | 649 | # check for preemptive scheduling |
25d83cbf | 650 | icm %r0,15,__TI_precount(%r9) |
2688905e | 651 | jnz io_restore # preemption is disabled |
1da177e4 LT |
652 | # switch to kernel stack |
653 | lg %r1,SP_R15(%r15) | |
654 | aghi %r1,-SP_SIZE | |
655 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
25d83cbf | 656 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
1da177e4 LT |
657 | lgr %r15,%r1 |
658 | io_resume_loop: | |
659 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | |
411788ea | 660 | jno io_restore |
b8e7a54c HC |
661 | larl %r14,io_resume_loop |
662 | jg preempt_schedule_irq | |
1da177e4 LT |
663 | #endif |
664 | ||
2688905e | 665 | io_work_user: |
1da177e4 LT |
666 | lg %r1,__LC_KERNEL_STACK |
667 | aghi %r1,-SP_SIZE | |
668 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
25d83cbf | 669 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
1da177e4 LT |
670 | lgr %r15,%r1 |
671 | # | |
672 | # One of the work bits is on. Find out which one. | |
54dfe5dd HC |
673 | # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED |
674 | # and _TIF_MCCK_PENDING | |
1da177e4 LT |
675 | # |
676 | io_work_loop: | |
77fa2245 HC |
677 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING |
678 | jo io_mcck_pending | |
1da177e4 LT |
679 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
680 | jo io_reschedule | |
02a029b3 | 681 | tm __TI_flags+7(%r9),_TIF_SIGPENDING |
54dfe5dd | 682 | jnz io_sigpending |
753c4dd6 MS |
683 | tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME |
684 | jnz io_notify_resume | |
411788ea HC |
685 | j io_restore |
686 | io_work_done: | |
1da177e4 | 687 | |
0eaeafa1 CB |
688 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) |
689 | sie_opcode: | |
690 | .long 0xb2140000 | |
691 | #endif | |
692 | ||
77fa2245 HC |
693 | # |
694 | # _TIF_MCCK_PENDING is set, call handler | |
695 | # | |
696 | io_mcck_pending: | |
b771aeac | 697 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler |
b771aeac | 698 | j io_work_loop |
77fa2245 | 699 | |
1da177e4 LT |
700 | # |
701 | # _TIF_NEED_RESCHED is set, call schedule | |
25d83cbf HC |
702 | # |
703 | io_reschedule: | |
411788ea | 704 | TRACE_IRQS_ON |
25d83cbf HC |
705 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
706 | brasl %r14,schedule # call scheduler | |
707 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | |
411788ea | 708 | TRACE_IRQS_OFF |
1da177e4 | 709 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
411788ea | 710 | jz io_restore # there is no work to do |
1da177e4 LT |
711 | j io_work_loop |
712 | ||
713 | # | |
02a029b3 | 714 | # _TIF_SIGPENDING or is set, call do_signal |
1da177e4 | 715 | # |
25d83cbf | 716 | io_sigpending: |
411788ea | 717 | TRACE_IRQS_ON |
25d83cbf HC |
718 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
719 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
1da177e4 | 720 | brasl %r14,do_signal # call do_signal |
25d83cbf | 721 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts |
411788ea | 722 | TRACE_IRQS_OFF |
e1c3ad96 | 723 | j io_work_loop |
1da177e4 | 724 | |
753c4dd6 MS |
725 | # |
726 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume | |
727 | # | |
728 | io_notify_resume: | |
729 | TRACE_IRQS_ON | |
730 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | |
731 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
732 | brasl %r14,do_notify_resume # call do_notify_resume | |
733 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | |
734 | TRACE_IRQS_OFF | |
735 | j io_work_loop | |
736 | ||
1da177e4 LT |
737 | /* |
738 | * External interrupt handler routine | |
739 | */ | |
25d83cbf | 740 | .globl ext_int_handler |
1da177e4 LT |
741 | ext_int_handler: |
742 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | |
743 | stck __LC_INT_CLOCK | |
744 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | |
63b12246 | 745 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
77fa2245 | 746 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
1da177e4 LT |
747 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
748 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
749 | jz ext_no_vtime | |
750 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
751 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
752 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
753 | ext_no_vtime: | |
754 | #endif | |
755 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
1f194a4c | 756 | TRACE_IRQS_OFF |
25d83cbf HC |
757 | la %r2,SP_PTREGS(%r15) # address of register-save area |
758 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | |
759 | brasl %r14,do_extint | |
1da177e4 LT |
760 | j io_return |
761 | ||
ae6aa2ea MS |
762 | __critical_end: |
763 | ||
1da177e4 LT |
764 | /* |
765 | * Machine check handler routines | |
766 | */ | |
25d83cbf | 767 | .globl mcck_int_handler |
1da177e4 | 768 | mcck_int_handler: |
77fa2245 HC |
769 | la %r1,4095 # revalidate r1 |
770 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | |
25d83cbf | 771 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
1da177e4 | 772 | SAVE_ALL_BASE __LC_SAVE_AREA+64 |
77fa2245 | 773 | la %r12,__LC_MCK_OLD_PSW |
25d83cbf | 774 | tm __LC_MCCK_CODE,0x80 # system damage? |
77fa2245 | 775 | jo mcck_int_main # yes -> rest of mcck code invalid |
1da177e4 | 776 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
63b12246 MS |
777 | la %r14,4095 |
778 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER | |
779 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) | |
780 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | |
781 | jo 1f | |
782 | la %r14,__LC_SYNC_ENTER_TIMER | |
783 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | |
784 | jl 0f | |
785 | la %r14,__LC_ASYNC_ENTER_TIMER | |
786 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | |
787 | jl 0f | |
788 | la %r14,__LC_EXIT_TIMER | |
789 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | |
790 | jl 0f | |
791 | la %r14,__LC_LAST_UPDATE_TIMER | |
792 | 0: spt 0(%r14) | |
793 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | |
794 | 1: | |
1da177e4 | 795 | #endif |
63b12246 | 796 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
77fa2245 | 797 | jno mcck_int_main # no -> skip cleanup critical |
25d83cbf | 798 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
77fa2245 HC |
799 | jnz mcck_int_main # from user -> load kernel stack |
800 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) | |
801 | jhe mcck_int_main | |
25d83cbf | 802 | clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) |
77fa2245 | 803 | jl mcck_int_main |
25d83cbf | 804 | brasl %r14,cleanup_critical |
77fa2245 | 805 | mcck_int_main: |
25d83cbf | 806 | lg %r14,__LC_PANIC_STACK # are we already on the panic stack? |
77fa2245 HC |
807 | slgr %r14,%r15 |
808 | srag %r14,%r14,PAGE_SHIFT | |
809 | jz 0f | |
25d83cbf | 810 | lg %r15,__LC_PANIC_STACK # load panic stack |
77fa2245 | 811 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 |
ae6aa2ea MS |
812 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
813 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | |
814 | jno mcck_no_vtime # no -> no timer update | |
63b12246 | 815 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
ae6aa2ea MS |
816 | jz mcck_no_vtime |
817 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
818 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
819 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
820 | mcck_no_vtime: | |
821 | #endif | |
77fa2245 HC |
822 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
823 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
824 | brasl %r14,s390_do_machine_check | |
25d83cbf | 825 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
77fa2245 HC |
826 | jno mcck_return |
827 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | |
828 | aghi %r1,-SP_SIZE | |
829 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
830 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | |
831 | lgr %r15,%r1 | |
832 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | |
833 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | |
834 | jno mcck_return | |
1f194a4c | 835 | TRACE_IRQS_OFF |
77fa2245 | 836 | brasl %r14,s390_handle_mcck |
1f194a4c | 837 | TRACE_IRQS_ON |
1da177e4 | 838 | mcck_return: |
63b12246 MS |
839 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW |
840 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | |
841 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | |
842 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
843 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 | |
844 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | |
845 | jno 0f | |
846 | stpt __LC_EXIT_TIMER | |
847 | 0: | |
848 | #endif | |
849 | lpswe __LC_RETURN_MCCK_PSW # back to caller | |
1da177e4 | 850 | |
1da177e4 LT |
851 | /* |
852 | * Restart interruption handler, kick starter for additional CPUs | |
853 | */ | |
84b36a8e | 854 | #ifdef CONFIG_SMP |
2bc89b5e | 855 | __CPUINIT |
25d83cbf | 856 | .globl restart_int_handler |
1da177e4 | 857 | restart_int_handler: |
25d83cbf HC |
858 | lg %r15,__LC_SAVE_AREA+120 # load ksp |
859 | lghi %r10,__LC_CREGS_SAVE_AREA | |
860 | lctlg %c0,%c15,0(%r10) # get new ctl regs | |
861 | lghi %r10,__LC_AREGS_SAVE_AREA | |
862 | lam %a0,%a15,0(%r10) | |
863 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone | |
864 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | |
865 | jg start_secondary | |
84b36a8e | 866 | .previous |
1da177e4 LT |
867 | #else |
868 | /* | |
869 | * If we do not run with SMP enabled, let the new CPU crash ... | |
870 | */ | |
25d83cbf | 871 | .globl restart_int_handler |
1da177e4 | 872 | restart_int_handler: |
25d83cbf | 873 | basr %r1,0 |
1da177e4 | 874 | restart_base: |
25d83cbf HC |
875 | lpswe restart_crash-restart_base(%r1) |
876 | .align 8 | |
1da177e4 | 877 | restart_crash: |
25d83cbf | 878 | .long 0x000a0000,0x00000000,0x00000000,0x00000000 |
1da177e4 LT |
879 | restart_go: |
880 | #endif | |
881 | ||
882 | #ifdef CONFIG_CHECK_STACK | |
883 | /* | |
884 | * The synchronous or the asynchronous stack overflowed. We are dead. | |
885 | * No need to properly save the registers, we are going to panic anyway. | |
886 | * Setup a pt_regs so that show_trace can provide a good call trace. | |
887 | */ | |
888 | stack_overflow: | |
889 | lg %r15,__LC_PANIC_STACK # change to panic stack | |
9514e231 | 890 | aghi %r15,-SP_SIZE |
1da177e4 LT |
891 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack |
892 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | |
893 | la %r1,__LC_SAVE_AREA | |
894 | chi %r12,__LC_SVC_OLD_PSW | |
895 | je 0f | |
896 | chi %r12,__LC_PGM_OLD_PSW | |
897 | je 0f | |
9514e231 | 898 | la %r1,__LC_SAVE_AREA+32 |
25d83cbf | 899 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack |
9e74a6b8 | 900 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK |
25d83cbf HC |
901 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
902 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
1da177e4 LT |
903 | jg kernel_stack_overflow |
904 | #endif | |
905 | ||
906 | cleanup_table_system_call: | |
907 | .quad system_call, sysc_do_svc | |
908 | cleanup_table_sysc_return: | |
909 | .quad sysc_return, sysc_leave | |
910 | cleanup_table_sysc_leave: | |
411788ea | 911 | .quad sysc_leave, sysc_done |
1da177e4 | 912 | cleanup_table_sysc_work_loop: |
411788ea | 913 | .quad sysc_work_loop, sysc_work_done |
63b12246 MS |
914 | cleanup_table_io_return: |
915 | .quad io_return, io_leave | |
ae6aa2ea MS |
916 | cleanup_table_io_leave: |
917 | .quad io_leave, io_done | |
918 | cleanup_table_io_work_loop: | |
411788ea | 919 | .quad io_work_loop, io_work_done |
1da177e4 LT |
920 | |
921 | cleanup_critical: | |
922 | clc 8(8,%r12),BASED(cleanup_table_system_call) | |
923 | jl 0f | |
924 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) | |
925 | jl cleanup_system_call | |
926 | 0: | |
927 | clc 8(8,%r12),BASED(cleanup_table_sysc_return) | |
928 | jl 0f | |
929 | clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) | |
930 | jl cleanup_sysc_return | |
931 | 0: | |
932 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave) | |
933 | jl 0f | |
934 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) | |
935 | jl cleanup_sysc_leave | |
936 | 0: | |
937 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) | |
938 | jl 0f | |
939 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | |
77fa2245 | 940 | jl cleanup_sysc_return |
63b12246 MS |
941 | 0: |
942 | clc 8(8,%r12),BASED(cleanup_table_io_return) | |
943 | jl 0f | |
944 | clc 8(8,%r12),BASED(cleanup_table_io_return+8) | |
945 | jl cleanup_io_return | |
ae6aa2ea MS |
946 | 0: |
947 | clc 8(8,%r12),BASED(cleanup_table_io_leave) | |
948 | jl 0f | |
949 | clc 8(8,%r12),BASED(cleanup_table_io_leave+8) | |
950 | jl cleanup_io_leave | |
951 | 0: | |
952 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop) | |
953 | jl 0f | |
954 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) | |
955 | jl cleanup_io_return | |
1da177e4 LT |
956 | 0: |
957 | br %r14 | |
958 | ||
959 | cleanup_system_call: | |
960 | mvc __LC_RETURN_PSW(16),0(%r12) | |
ae6aa2ea MS |
961 | cghi %r12,__LC_MCK_OLD_PSW |
962 | je 0f | |
963 | la %r12,__LC_SAVE_AREA+32 | |
964 | j 1f | |
965 | 0: la %r12,__LC_SAVE_AREA+64 | |
966 | 1: | |
1da177e4 LT |
967 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
968 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | |
969 | jh 0f | |
970 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
971 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | |
972 | jhe cleanup_vtime | |
973 | #endif | |
974 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | |
975 | jh 0f | |
ae6aa2ea MS |
976 | mvc __LC_SAVE_AREA(32),0(%r12) |
977 | 0: stg %r13,8(%r12) | |
978 | stg %r12,__LC_SAVE_AREA+96 # argh | |
63b12246 | 979 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 980 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
ae6aa2ea MS |
981 | lg %r12,__LC_SAVE_AREA+96 # argh |
982 | stg %r15,24(%r12) | |
1da177e4 LT |
983 | llgh %r7,__LC_SVC_INT_CODE |
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
985 | cleanup_vtime: | |
986 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | |
987 | jhe cleanup_stime | |
1da177e4 LT |
988 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
989 | cleanup_stime: | |
990 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) | |
991 | jh cleanup_update | |
992 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
993 | cleanup_update: | |
994 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
1da177e4 LT |
995 | #endif |
996 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | |
997 | la %r12,__LC_RETURN_PSW | |
998 | br %r14 | |
999 | cleanup_system_call_insn: | |
1000 | .quad sysc_saveall | |
1001 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
25d83cbf HC |
1002 | .quad system_call |
1003 | .quad sysc_vtime | |
1004 | .quad sysc_stime | |
1005 | .quad sysc_update | |
1da177e4 LT |
1006 | #endif |
1007 | ||
1008 | cleanup_sysc_return: | |
1009 | mvc __LC_RETURN_PSW(8),0(%r12) | |
1010 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) | |
1011 | la %r12,__LC_RETURN_PSW | |
1012 | br %r14 | |
1013 | ||
1014 | cleanup_sysc_leave: | |
1015 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | |
ae6aa2ea | 1016 | je 2f |
1da177e4 LT |
1017 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1018 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
1019 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | |
ae6aa2ea | 1020 | je 2f |
1da177e4 LT |
1021 | #endif |
1022 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | |
ae6aa2ea MS |
1023 | cghi %r12,__LC_MCK_OLD_PSW |
1024 | jne 0f | |
1025 | mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) | |
1026 | j 1f | |
1027 | 0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | |
1028 | 1: lmg %r0,%r11,SP_R0(%r15) | |
1da177e4 | 1029 | lg %r15,SP_R15(%r15) |
ae6aa2ea | 1030 | 2: la %r12,__LC_RETURN_PSW |
1da177e4 LT |
1031 | br %r14 |
1032 | cleanup_sysc_leave_insn: | |
411788ea | 1033 | .quad sysc_done - 4 |
1da177e4 | 1034 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
411788ea | 1035 | .quad sysc_done - 8 |
1da177e4 | 1036 | #endif |
1da177e4 | 1037 | |
ae6aa2ea MS |
1038 | cleanup_io_return: |
1039 | mvc __LC_RETURN_PSW(8),0(%r12) | |
1040 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) | |
1041 | la %r12,__LC_RETURN_PSW | |
1042 | br %r14 | |
1043 | ||
1044 | cleanup_io_leave: | |
1045 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) | |
1046 | je 2f | |
1047 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
1048 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
1049 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) | |
1050 | je 2f | |
1051 | #endif | |
1052 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | |
1053 | cghi %r12,__LC_MCK_OLD_PSW | |
1054 | jne 0f | |
1055 | mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) | |
1056 | j 1f | |
1057 | 0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | |
1058 | 1: lmg %r0,%r11,SP_R0(%r15) | |
1059 | lg %r15,SP_R15(%r15) | |
1060 | 2: la %r12,__LC_RETURN_PSW | |
1061 | br %r14 | |
1062 | cleanup_io_leave_insn: | |
411788ea | 1063 | .quad io_done - 4 |
ae6aa2ea | 1064 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
411788ea | 1065 | .quad io_done - 8 |
ae6aa2ea | 1066 | #endif |
ae6aa2ea | 1067 | |
1da177e4 LT |
1068 | /* |
1069 | * Integer constants | |
1070 | */ | |
25d83cbf | 1071 | .align 4 |
1da177e4 | 1072 | .Lconst: |
25d83cbf HC |
1073 | .Lnr_syscalls: .long NR_syscalls |
1074 | .L0x0130: .short 0x130 | |
1075 | .L0x0140: .short 0x140 | |
1076 | .L0x0150: .short 0x150 | |
1077 | .L0x0160: .short 0x160 | |
1078 | .L0x0170: .short 0x170 | |
1da177e4 | 1079 | .Lcritical_start: |
25d83cbf | 1080 | .quad __critical_start |
1da177e4 | 1081 | .Lcritical_end: |
25d83cbf | 1082 | .quad __critical_end |
1da177e4 | 1083 | |
25d83cbf | 1084 | .section .rodata, "a" |
1da177e4 | 1085 | #define SYSCALL(esa,esame,emu) .long esame |
1da177e4 LT |
1086 | sys_call_table: |
1087 | #include "syscalls.S" | |
1088 | #undef SYSCALL | |
1089 | ||
347a8dc3 | 1090 | #ifdef CONFIG_COMPAT |
1da177e4 LT |
1091 | |
1092 | #define SYSCALL(esa,esame,emu) .long emu | |
1da177e4 LT |
1093 | sys_call_table_emu: |
1094 | #include "syscalls.S" | |
1095 | #undef SYSCALL | |
1096 | #endif |