]>
Commit | Line | Data |
---|---|---|
b00dc837 | 1 | /* |
1da177e4 LT |
2 | * rtrap.S: Preparing for return from trap on Sparc V9. |
3 | * | |
4 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | |
9 | #include <asm/asi.h> | |
10 | #include <asm/pstate.h> | |
11 | #include <asm/ptrace.h> | |
12 | #include <asm/spitfire.h> | |
13 | #include <asm/head.h> | |
14 | #include <asm/visasm.h> | |
15 | #include <asm/processor.h> | |
16 | ||
64f2dde3 DM |
17 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) |
18 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | |
19 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | |
1da177e4 | 20 | |
1da177e4 LT |
21 | .text |
22 | .align 32 | |
1da177e4 LT |
23 | __handle_preemption: |
24 | call schedule | |
25 | wrpr %g0, RTRAP_PSTATE, %pstate | |
26 | ba,pt %xcc, __handle_preemption_continue | |
27 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | |
28 | ||
29 | __handle_user_windows: | |
30 | call fault_in_user_windows | |
31 | wrpr %g0, RTRAP_PSTATE, %pstate | |
caebf910 AV |
32 | ba,pt %xcc, __handle_preemption_continue |
33 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | |
1da177e4 | 34 | |
1da177e4 LT |
35 | __handle_userfpu: |
36 | rd %fprs, %l5 | |
37 | andcc %l5, FPRS_FEF, %g0 | |
38 | sethi %hi(TSTATE_PEF), %o0 | |
39 | be,a,pn %icc, __handle_userfpu_continue | |
40 | andn %l1, %o0, %l1 | |
41 | ba,a,pt %xcc, __handle_userfpu_continue | |
42 | ||
43 | __handle_signal: | |
2d7d5f05 | 44 | mov %l5, %o1 |
2d7d5f05 | 45 | add %sp, PTREGS_OFF, %o0 |
7697daaa | 46 | mov %l0, %o2 |
1da177e4 LT |
47 | call do_notify_resume |
48 | wrpr %g0, RTRAP_PSTATE, %pstate | |
49 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | |
1da177e4 LT |
50 | |
51 | /* Signal delivery can modify pt_regs tstate, so we must | |
52 | * reload it. | |
53 | */ | |
54 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | |
55 | sethi %hi(0xf << 20), %l4 | |
56 | and %l1, %l4, %l4 | |
caebf910 | 57 | ba,pt %xcc, __handle_preemption_continue |
1da177e4 LT |
58 | andn %l1, %l4, %l1 |
59 | ||
5565736e DM |
60 | /* When returning from a NMI (%pil==15) interrupt we want to |
61 | * avoid running softirqs, doing IRQ tracing, preempting, etc. | |
62 | */ | |
63 | .globl rtrap_nmi | |
64 | rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | |
65 | sethi %hi(0xf << 20), %l4 | |
66 | and %l1, %l4, %l4 | |
67 | andn %l1, %l4, %l1 | |
68 | srl %l4, 20, %l4 | |
69 | ba,pt %xcc, rtrap_no_irq_enable | |
70 | wrpr %l4, %pil | |
71 | ||
1da177e4 | 72 | .align 64 |
7697daaa | 73 | .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall |
1da177e4 | 74 | rtrap_irq: |
1da177e4 | 75 | rtrap: |
d7ce78fd | 76 | #ifndef CONFIG_SMP |
dd17c8f7 RR |
77 | sethi %hi(__cpu_data), %l0 |
78 | lduw [%l0 + %lo(__cpu_data)], %l1 | |
d7ce78fd | 79 | #else |
dd17c8f7 RR |
80 | sethi %hi(__cpu_data), %l0 |
81 | or %l0, %lo(__cpu_data), %l0 | |
d7ce78fd DM |
82 | lduw [%l0 + %g5], %l1 |
83 | #endif | |
1da177e4 LT |
84 | cmp %l1, 0 |
85 | ||
86 | /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ | |
1da177e4 | 87 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
1da177e4 LT |
88 | rtrap_xcall: |
89 | sethi %hi(0xf << 20), %l4 | |
1da177e4 | 90 | and %l1, %l4, %l4 |
10e26723 DM |
91 | andn %l1, %l4, %l1 |
92 | srl %l4, 20, %l4 | |
93 | #ifdef CONFIG_TRACE_IRQFLAGS | |
94 | brnz,pn %l4, rtrap_no_irq_enable | |
95 | nop | |
96 | call trace_hardirqs_on | |
97 | nop | |
28a1f533 DM |
98 | /* Do not actually set the %pil here. We will do that |
99 | * below after we clear PSTATE_IE in the %pstate register. | |
100 | * If we re-enable interrupts here, we can recurse down | |
101 | * the hardirq stack potentially endlessly, causing a | |
102 | * stack overflow. | |
103 | * | |
104 | * It is tempting to put this test and trace_hardirqs_on | |
105 | * call at the 'rt_continue' label, but that will not work | |
106 | * as that path hits unconditionally and we do not want to | |
107 | * execute this in NMI return paths, for example. | |
108 | */ | |
10e26723 | 109 | #endif |
5565736e | 110 | rtrap_no_irq_enable: |
10e26723 | 111 | andcc %l1, TSTATE_PRIV, %l3 |
1da177e4 | 112 | bne,pn %icc, to_kernel |
10e26723 | 113 | nop |
1da177e4 LT |
114 | |
115 | /* We must hold IRQs off and atomically test schedule+signal | |
116 | * state, then hold them off all the way back to userspace. | |
10e26723 DM |
117 | * If we are returning to kernel, none of this matters. Note |
118 | * that we are disabling interrupts via PSTATE_IE, not using | |
119 | * %pil. | |
1da177e4 LT |
120 | * |
121 | * If we do not do this, there is a window where we would do | |
122 | * the tests, later the signal/resched event arrives but we do | |
123 | * not process it since we are still in kernel mode. It would | |
124 | * take until the next local IRQ before the signal/resched | |
125 | * event would be handled. | |
126 | * | |
c7d5a005 DM |
127 | * This also means that if we have to deal with user |
128 | * windows, we have to redo all of these sched+signal checks | |
129 | * with IRQs disabled. | |
1da177e4 LT |
130 | */ |
131 | to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | |
132 | wrpr 0, %pil | |
133 | __handle_preemption_continue: | |
134 | ldx [%g6 + TI_FLAGS], %l0 | |
135 | sethi %hi(_TIF_USER_WORK_MASK), %o0 | |
136 | or %o0, %lo(_TIF_USER_WORK_MASK), %o0 | |
137 | andcc %l0, %o0, %g0 | |
138 | sethi %hi(TSTATE_PEF), %o0 | |
139 | be,pt %xcc, user_nowork | |
140 | andcc %l1, %o0, %g0 | |
141 | andcc %l0, _TIF_NEED_RESCHED, %g0 | |
142 | bne,pn %xcc, __handle_preemption | |
e35a8925 | 143 | andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 |
1da177e4 | 144 | bne,pn %xcc, __handle_signal |
1da177e4 LT |
145 | ldub [%g6 + TI_WSAVED], %o2 |
146 | brnz,pn %o2, __handle_user_windows | |
147 | nop | |
1da177e4 | 148 | sethi %hi(TSTATE_PEF), %o0 |
c7d5a005 | 149 | andcc %l1, %o0, %g0 |
1da177e4 LT |
150 | |
151 | /* This fpdepth clear is necessary for non-syscall rtraps only */ | |
152 | user_nowork: | |
153 | bne,pn %xcc, __handle_userfpu | |
154 | stb %g0, [%g6 + TI_FPDEPTH] | |
155 | __handle_userfpu_continue: | |
156 | ||
157 | rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |
158 | ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2 | |
159 | ||
160 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 | |
161 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 | |
162 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 | |
56fb4df6 | 163 | brz,pt %l3, 1f |
314981ac DM |
164 | mov %g6, %l2 |
165 | ||
56fb4df6 | 166 | /* Must do this before thread reg is clobbered below. */ |
ffe483d5 | 167 | LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) |
74bf4312 DM |
168 | 1: |
169 | ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | |
1da177e4 | 170 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 |
936f482a DM |
171 | |
172 | /* Normal globals are restored, go to trap globals. */ | |
173 | 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | |
af02bec6 DM |
174 | nop |
175 | .section .sun4v_2insn_patch, "ax" | |
936f482a | 176 | .word 661b |
af02bec6 | 177 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate |
936f482a DM |
178 | SET_GL(1) |
179 | .previous | |
180 | ||
314981ac DM |
181 | mov %l2, %g6 |
182 | ||
1da177e4 LT |
183 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 |
184 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 | |
185 | ||
186 | ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 | |
187 | ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 | |
188 | ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 | |
189 | ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 | |
190 | ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6 | |
191 | ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7 | |
192 | ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2 | |
193 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2 | |
194 | ||
195 | ld [%sp + PTREGS_OFF + PT_V9_Y], %o3 | |
196 | wr %o3, %g0, %y | |
1da177e4 LT |
197 | wrpr %l4, 0x0, %pil |
198 | wrpr %g0, 0x1, %tl | |
28e61036 | 199 | andn %l1, TSTATE_SYSCALL, %l1 |
1da177e4 LT |
200 | wrpr %l1, %g0, %tstate |
201 | wrpr %l2, %g0, %tpc | |
202 | wrpr %o2, %g0, %tnpc | |
203 | ||
204 | brnz,pn %l3, kern_rtt | |
205 | mov PRIMARY_CONTEXT, %l7 | |
8b11bd12 DM |
206 | |
207 | 661: ldxa [%l7 + %l7] ASI_DMMU, %l0 | |
208 | .section .sun4v_1insn_patch, "ax" | |
209 | .word 661b | |
210 | ldxa [%l7 + %l7] ASI_MMU, %l0 | |
211 | .previous | |
212 | ||
0835ae0f DM |
213 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 |
214 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 | |
1da177e4 | 215 | or %l0, %l1, %l0 |
8b11bd12 DM |
216 | |
217 | 661: stxa %l0, [%l7] ASI_DMMU | |
218 | .section .sun4v_1insn_patch, "ax" | |
219 | .word 661b | |
220 | stxa %l0, [%l7] ASI_MMU | |
221 | .previous | |
222 | ||
4da808c3 DM |
223 | sethi %hi(KERNBASE), %l7 |
224 | flush %l7 | |
1da177e4 LT |
225 | rdpr %wstate, %l1 |
226 | rdpr %otherwin, %l2 | |
227 | srl %l1, 3, %l1 | |
228 | ||
229 | wrpr %l2, %g0, %canrestore | |
230 | wrpr %l1, %g0, %wstate | |
314ef685 DM |
231 | brnz,pt %l2, user_rtt_restore |
232 | wrpr %g0, %g0, %otherwin | |
233 | ||
234 | ldx [%g6 + TI_FLAGS], %g3 | |
235 | wr %g0, ASI_AIUP, %asi | |
236 | rdpr %cwp, %g1 | |
237 | andcc %g3, _TIF_32BIT, %g0 | |
238 | sub %g1, 1, %g1 | |
239 | bne,pt %xcc, user_rtt_fill_32bit | |
240 | wrpr %g1, %cwp | |
241 | ba,a,pt %xcc, user_rtt_fill_64bit | |
242 | ||
243 | user_rtt_fill_fixup: | |
244 | rdpr %cwp, %g1 | |
245 | add %g1, 1, %g1 | |
246 | wrpr %g1, 0x0, %cwp | |
247 | ||
248 | rdpr %wstate, %g2 | |
249 | sll %g2, 3, %g2 | |
250 | wrpr %g2, 0x0, %wstate | |
251 | ||
252 | /* We know %canrestore and %otherwin are both zero. */ | |
253 | ||
254 | sethi %hi(sparc64_kern_pri_context), %g2 | |
255 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | |
256 | mov PRIMARY_CONTEXT, %g1 | |
8b11bd12 DM |
257 | |
258 | 661: stxa %g2, [%g1] ASI_DMMU | |
259 | .section .sun4v_1insn_patch, "ax" | |
260 | .word 661b | |
261 | stxa %g2, [%g1] ASI_MMU | |
262 | .previous | |
263 | ||
314ef685 DM |
264 | sethi %hi(KERNBASE), %g1 |
265 | flush %g1 | |
266 | ||
267 | or %g4, FAULT_CODE_WINFIXUP, %g4 | |
268 | stb %g4, [%g6 + TI_FAULT_CODE] | |
269 | stx %g5, [%g6 + TI_FAULT_ADDR] | |
270 | ||
271 | mov %g6, %l1 | |
272 | wrpr %g0, 0x0, %tl | |
936f482a DM |
273 | |
274 | 661: nop | |
df7d6aec | 275 | .section .sun4v_1insn_patch, "ax" |
936f482a DM |
276 | .word 661b |
277 | SET_GL(0) | |
278 | .previous | |
279 | ||
fc504928 DM |
280 | wrpr %g0, RTRAP_PSTATE, %pstate |
281 | ||
314ef685 DM |
282 | mov %l1, %g6 |
283 | ldx [%g6 + TI_TASK], %g4 | |
284 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | |
285 | call do_sparc64_fault | |
286 | add %sp, PTREGS_OFF, %o0 | |
287 | ba,pt %xcc, rtrap | |
288 | nop | |
289 | ||
290 | user_rtt_pre_restore: | |
291 | add %g1, 1, %g1 | |
292 | wrpr %g1, 0x0, %cwp | |
293 | ||
294 | user_rtt_restore: | |
1da177e4 LT |
295 | restore |
296 | rdpr %canrestore, %g1 | |
297 | wrpr %g1, 0x0, %cleanwin | |
298 | retry | |
299 | nop | |
300 | ||
314ef685 DM |
301 | kern_rtt: rdpr %canrestore, %g1 |
302 | brz,pn %g1, kern_rtt_fill | |
303 | nop | |
304 | kern_rtt_restore: | |
ada44a04 | 305 | stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC] |
314ef685 | 306 | restore |
1da177e4 | 307 | retry |
314ef685 | 308 | |
1da177e4 LT |
309 | to_kernel: |
310 | #ifdef CONFIG_PREEMPT | |
311 | ldsw [%g6 + TI_PRE_COUNT], %l5 | |
312 | brnz %l5, kern_fpucheck | |
313 | ldx [%g6 + TI_FLAGS], %l5 | |
314 | andcc %l5, _TIF_NEED_RESCHED, %g0 | |
315 | be,pt %xcc, kern_fpucheck | |
10e26723 DM |
316 | nop |
317 | cmp %l4, 0 | |
1da177e4 LT |
318 | bne,pn %xcc, kern_fpucheck |
319 | sethi %hi(PREEMPT_ACTIVE), %l6 | |
320 | stw %l6, [%g6 + TI_PRE_COUNT] | |
321 | call schedule | |
322 | nop | |
323 | ba,pt %xcc, rtrap | |
324 | stw %g0, [%g6 + TI_PRE_COUNT] | |
325 | #endif | |
326 | kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 | |
327 | brz,pt %l5, rt_continue | |
328 | srl %l5, 1, %o0 | |
329 | add %g6, TI_FPSAVED, %l6 | |
330 | ldub [%l6 + %o0], %l2 | |
331 | sub %l5, 2, %l5 | |
332 | ||
333 | add %g6, TI_GSR, %o1 | |
334 | andcc %l2, (FPRS_FEF|FPRS_DU), %g0 | |
335 | be,pt %icc, 2f | |
336 | and %l2, FPRS_DL, %l6 | |
337 | andcc %l2, FPRS_FEF, %g0 | |
338 | be,pn %icc, 5f | |
339 | sll %o0, 3, %o5 | |
340 | rd %fprs, %g1 | |
341 | ||
342 | wr %g1, FPRS_FEF, %fprs | |
343 | ldx [%o1 + %o5], %g1 | |
344 | add %g6, TI_XFSR, %o1 | |
1da177e4 LT |
345 | sll %o0, 8, %o2 |
346 | add %g6, TI_FPREGS, %o3 | |
347 | brz,pn %l6, 1f | |
348 | add %g6, TI_FPREGS+0x40, %o4 | |
349 | ||
ba639933 | 350 | membar #Sync |
1da177e4 LT |
351 | ldda [%o3 + %o2] ASI_BLK_P, %f0 |
352 | ldda [%o4 + %o2] ASI_BLK_P, %f16 | |
ba639933 | 353 | membar #Sync |
1da177e4 LT |
354 | 1: andcc %l2, FPRS_DU, %g0 |
355 | be,pn %icc, 1f | |
356 | wr %g1, 0, %gsr | |
357 | add %o2, 0x80, %o2 | |
ba639933 | 358 | membar #Sync |
1da177e4 LT |
359 | ldda [%o3 + %o2] ASI_BLK_P, %f32 |
360 | ldda [%o4 + %o2] ASI_BLK_P, %f48 | |
1da177e4 LT |
361 | 1: membar #Sync |
362 | ldx [%o1 + %o5], %fsr | |
363 | 2: stb %l5, [%g6 + TI_FPDEPTH] | |
364 | ba,pt %xcc, rt_continue | |
365 | nop | |
366 | 5: wr %g0, FPRS_FEF, %fprs | |
1da177e4 LT |
367 | sll %o0, 8, %o2 |
368 | ||
369 | add %g6, TI_FPREGS+0x80, %o3 | |
370 | add %g6, TI_FPREGS+0xc0, %o4 | |
ba639933 | 371 | membar #Sync |
1da177e4 LT |
372 | ldda [%o3 + %o2] ASI_BLK_P, %f32 |
373 | ldda [%o4 + %o2] ASI_BLK_P, %f48 | |
374 | membar #Sync | |
375 | wr %g0, FPRS_DU, %fprs | |
376 | ba,pt %xcc, rt_continue | |
377 | stb %l5, [%g6 + TI_FPDEPTH] |