]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ia64/kernel/mca_asm.S
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
[mirror_ubuntu-bionic-kernel.git] / arch / ia64 / kernel / mca_asm.S
CommitLineData
fe77efb8
HS
1/*
2 * File: mca_asm.S
3 * Purpose: assembly portion of the IA64 MCA handling
4 *
5 * Mods by cfleck to integrate into kernel build
6 *
7 * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
8 * Added various stop bits to get a clean compile
9 *
10 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
11 * Added code to save INIT handoff state in pt_regs format,
12 * switch to temp kstack, switch modes, jump to C INIT handler
13 *
14 * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
15 * Before entering virtual mode code:
16 * 1. Check for TLB CPU error
17 * 2. Restore current thread pointer to kr6
18 * 3. Move stack ptr 16 bytes to conform to C calling convention
19 *
20 * 2004-11-12 Russ Anderson <rja@sgi.com>
21 * Added per cpu MCA/INIT stack save areas.
22 *
23 * 2005-12-08 Keith Owens <kaos@sgi.com>
24 * Use per cpu MCA/INIT stacks for all data.
25 */
1da177e4
LT
26#include <linux/threads.h>
27
28#include <asm/asmmacro.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31#include <asm/mca_asm.h>
32#include <asm/mca.h>
33
7f613c7d 34#include "entry.h"
1da177e4
LT
35
36#define GET_IA64_MCA_DATA(reg) \
37 GET_THIS_PADDR(reg, ia64_mca_data) \
38 ;; \
39 ld8 reg=[reg]
40
b8d8b883 41 .global ia64_do_tlb_purge
7f613c7d
KO
42 .global ia64_os_mca_dispatch
43 .global ia64_os_init_dispatch_monarch
44 .global ia64_os_init_dispatch_slave
1da177e4
LT
45
46 .text
47 .align 16
48
7f613c7d
KO
49//StartMain////////////////////////////////////////////////////////////////////
50
b8d8b883
AR
51/*
52 * Just the TLB purge part is moved to a separate function
53 * so we can re-use the code for cpu hotplug code as well
54 * Caller should now setup b1, so we can branch once the
55 * tlb flush is complete.
56 */
1da177e4 57
b8d8b883 58ia64_do_tlb_purge:
1da177e4
LT
59#define O(member) IA64_CPUINFO_##member##_OFFSET
60
61 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
62 ;;
63 addl r17=O(PTCE_STRIDE),r2
64 addl r2=O(PTCE_BASE),r2
65 ;;
66 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
67 ld4 r19=[r2],4 // r19=ptce_count[0]
68 ld4 r21=[r17],4 // r21=ptce_stride[0]
69 ;;
70 ld4 r20=[r2] // r20=ptce_count[1]
71 ld4 r22=[r17] // r22=ptce_stride[1]
72 mov r24=0
73 ;;
74 adds r20=-1,r20
75 ;;
76#undef O
77
782:
79 cmp.ltu p6,p7=r24,r19
80(p7) br.cond.dpnt.few 4f
81 mov ar.lc=r20
823:
83 ptc.e r18
84 ;;
85 add r18=r22,r18
86 br.cloop.sptk.few 3b
87 ;;
88 add r18=r21,r18
89 add r24=1,r24
90 ;;
91 br.sptk.few 2b
924:
93 srlz.i // srlz.i implies srlz.d
94 ;;
95
96 // Now purge addresses formerly mapped by TR registers
97 // 1. Purge ITR&DTR for kernel.
98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;;
101 ptr.i r16, r18
102 ptr.d r16, r18
103 ;;
104 srlz.i
105 ;;
106 srlz.d
107 ;;
1da177e4
LT
108 // 3. Purge ITR for PAL code.
109 GET_THIS_PADDR(r2, ia64_mca_pal_base)
110 ;;
111 ld8 r16=[r2]
112 mov r18=IA64_GRANULE_SHIFT<<2
113 ;;
114 ptr.i r16,r18
115 ;;
116 srlz.i
117 ;;
118 // 4. Purge DTR for stack.
119 mov r16=IA64_KR(CURRENT_STACK)
120 ;;
121 shl r16=r16,IA64_GRANULE_SHIFT
122 movl r19=PAGE_OFFSET
123 ;;
124 add r16=r19,r16
125 mov r18=IA64_GRANULE_SHIFT<<2
126 ;;
127 ptr.d r16,r18
128 ;;
129 srlz.i
130 ;;
b8d8b883
AR
131 // Now branch away to caller.
132 br.sptk.many b1
133 ;;
134
7f613c7d
KO
135//EndMain//////////////////////////////////////////////////////////////////////
136
137//StartMain////////////////////////////////////////////////////////////////////
b8d8b883 138
7f613c7d 139ia64_os_mca_dispatch:
7f613c7d
KO
140 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
141 LOAD_PHYSICAL(p0,r2,1f) // return address
142 mov r19=1 // All MCA events are treated as monarch (for now)
143 br.sptk ia64_state_save // save the state that is not in minstate
1441:
b8d8b883 145
7f613c7d
KO
146 GET_IA64_MCA_DATA(r2)
147 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
148 ;;
d270acbc 149 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
b8d8b883 150 ;;
7f613c7d 151 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
b8d8b883
AR
152 ;;
153 tbit.nz p6,p7=r18,60
154(p7) br.spnt done_tlb_purge_and_reload
155
156 // The following code purges TC and TR entries. Then reload all TC entries.
157 // Purge percpu data TC entries.
158begin_tlb_purge_and_reload:
159 movl r18=ia64_reload_tr;;
160 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
161 mov b1=r18;;
162 br.sptk.many ia64_do_tlb_purge;;
163
164ia64_reload_tr:
1da177e4
LT
165 // Finally reload the TR registers.
166 // 1. Reload DTR/ITR registers for kernel.
167 mov r18=KERNEL_TR_PAGE_SHIFT<<2
168 movl r17=KERNEL_START
169 ;;
170 mov cr.itir=r18
171 mov cr.ifa=r17
172 mov r16=IA64_TR_KERNEL
173 mov r19=ip
174 movl r18=PAGE_KERNEL
175 ;;
176 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
177 ;;
178 or r18=r17,r18
179 ;;
180 itr.i itr[r16]=r18
181 ;;
182 itr.d dtr[r16]=r18
183 ;;
184 srlz.i
185 srlz.d
186 ;;
1da177e4
LT
187 // 3. Reload ITR for PAL code.
188 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
189 ;;
190 ld8 r18=[r2] // load PAL PTE
191 ;;
192 GET_THIS_PADDR(r2, ia64_mca_pal_base)
193 ;;
194 ld8 r16=[r2] // load PAL vaddr
195 mov r19=IA64_GRANULE_SHIFT<<2
196 ;;
197 mov cr.itir=r19
198 mov cr.ifa=r16
199 mov r20=IA64_TR_PALCODE
200 ;;
201 itr.i itr[r20]=r18
202 ;;
203 srlz.i
204 ;;
205 // 4. Reload DTR for stack.
206 mov r16=IA64_KR(CURRENT_STACK)
207 ;;
208 shl r16=r16,IA64_GRANULE_SHIFT
209 movl r19=PAGE_OFFSET
210 ;;
211 add r18=r19,r16
212 movl r20=PAGE_KERNEL
213 ;;
214 add r16=r20,r16
215 mov r19=IA64_GRANULE_SHIFT<<2
216 ;;
217 mov cr.itir=r19
218 mov cr.ifa=r18
219 mov r20=IA64_TR_CURRENT_STACK
220 ;;
221 itr.d dtr[r20]=r16
222 ;;
223 srlz.d
1da177e4
LT
224
225done_tlb_purge_and_reload:
226
7f613c7d
KO
227 // switch to per cpu MCA stack
228 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
229 LOAD_PHYSICAL(p0,r2,1f) // return address
230 br.sptk ia64_new_stack
2311:
232
233 // everything saved, now we can set the kernel registers
234 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
235 LOAD_PHYSICAL(p0,r2,1f) // return address
236 br.sptk ia64_set_kernel_registers
2371:
1da177e4 238
7f613c7d 239 // This must be done in physical mode
1da177e4
LT
240 GET_IA64_MCA_DATA(r2)
241 ;;
7f613c7d 242 mov r7=r2
1da177e4
LT
243
244 // Enter virtual mode from physical mode
245 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
7f613c7d
KO
246
247 // This code returns to SAL via SOS r2, in general SAL has no unwind
248 // data. To get a clean termination when backtracing the C MCA/INIT
249 // handler, set a dummy return address of 0 in this routine. That
250 // requires that ia64_os_mca_virtual_begin be a global function.
251ENTRY(ia64_os_mca_virtual_begin)
252 .prologue
253 .save rp,r0
254 .body
255
256 mov ar.rsc=3 // set eager mode for C handler
257 mov r2=r7 // see GET_IA64_MCA_DATA above
258 ;;
1da177e4
LT
259
260 // Call virtual mode handler
7f613c7d
KO
261 alloc r14=ar.pfs,0,0,3,0
262 ;;
263 DATA_PA_TO_VA(r2,r7)
264 ;;
265 add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
266 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
267 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
268 br.call.sptk.many b0=ia64_mca_handler
269
1da177e4
LT
270 // Revert back to physical mode before going back to SAL
271 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
272ia64_os_mca_virtual_end:
273
7f613c7d
KO
274END(ia64_os_mca_virtual_begin)
275
276 // switch back to previous stack
277 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
278 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
279 LOAD_PHYSICAL(p0,r2,1f) // return address
280 br.sptk ia64_old_stack
2811:
282
283 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
284 LOAD_PHYSICAL(p0,r2,1f) // return address
285 br.sptk ia64_state_restore // restore the SAL state
2861:
287
288 mov b0=r12 // SAL_CHECK return address
289
7f613c7d
KO
290 br b0
291
292//EndMain//////////////////////////////////////////////////////////////////////
293
294//StartMain////////////////////////////////////////////////////////////////////
295
296//
297// SAL to OS entry point for INIT on all processors. This has been defined for
298// registration purposes with SAL as a part of ia64_mca_init. Monarch and
299// slave INIT have identical processing, except for the value of the
300// sos->monarch flag in r19.
301//
302
303ia64_os_init_dispatch_monarch:
304 mov r19=1 // Bow, bow, ye lower middle classes!
305 br.sptk ia64_os_init_dispatch
306
307ia64_os_init_dispatch_slave:
308 mov r19=0 // <igor>yeth, mathter</igor>
309
310ia64_os_init_dispatch:
311
312 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
313 LOAD_PHYSICAL(p0,r2,1f) // return address
314 br.sptk ia64_state_save // save the state that is not in minstate
3151:
316
317 // switch to per cpu INIT stack
318 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
319 LOAD_PHYSICAL(p0,r2,1f) // return address
320 br.sptk ia64_new_stack
3211:
322
323 // everything saved, now we can set the kernel registers
324 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
325 LOAD_PHYSICAL(p0,r2,1f) // return address
326 br.sptk ia64_set_kernel_registers
3271:
328
329 // This must be done in physical mode
1da177e4
LT
330 GET_IA64_MCA_DATA(r2)
331 ;;
7f613c7d
KO
332 mov r7=r2
333
334 // Enter virtual mode from physical mode
335 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
336
337 // This code returns to SAL via SOS r2, in general SAL has no unwind
338 // data. To get a clean termination when backtracing the C MCA/INIT
339 // handler, set a dummy return address of 0 in this routine. That
340 // requires that ia64_os_init_virtual_begin be a global function.
341ENTRY(ia64_os_init_virtual_begin)
342 .prologue
343 .save rp,r0
344 .body
345
346 mov ar.rsc=3 // set eager mode for C handler
347 mov r2=r7 // see GET_IA64_MCA_DATA above
1da177e4 348 ;;
1da177e4 349
7f613c7d
KO
350 // Call virtual mode handler
351 alloc r14=ar.pfs,0,0,3,0
352 ;;
353 DATA_PA_TO_VA(r2,r7)
1da177e4 354 ;;
7f613c7d
KO
355 add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
356 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
357 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
358 br.call.sptk.many b0=ia64_init_handler
1da177e4 359
7f613c7d
KO
360 // Revert back to physical mode before going back to SAL
361 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
362ia64_os_init_virtual_end:
1da177e4 363
7f613c7d
KO
364END(ia64_os_init_virtual_begin)
365
366 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
367 LOAD_PHYSICAL(p0,r2,1f) // return address
368 br.sptk ia64_state_restore // restore the SAL state
3691:
1da177e4 370
7f613c7d
KO
371 // switch back to previous stack
372 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
373 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
374 LOAD_PHYSICAL(p0,r2,1f) // return address
375 br.sptk ia64_old_stack
3761:
377
378 mov b0=r12 // SAL_CHECK return address
1da177e4 379 br b0
7f613c7d 380
1da177e4
LT
381//EndMain//////////////////////////////////////////////////////////////////////
382
7f613c7d
KO
383// common defines for the stubs
384#define ms r4
385#define regs r5
386#define temp1 r2 /* careful, it overlaps with input registers */
387#define temp2 r3 /* careful, it overlaps with input registers */
388#define temp3 r7
389#define temp4 r14
390
1da177e4
LT
391
392//++
393// Name:
7f613c7d 394// ia64_state_save()
1da177e4
LT
395//
396// Stub Description:
397//
7f613c7d
KO
398// Save the state that is not in minstate. This is sensitive to the layout of
399// struct ia64_sal_os_state in mca.h.
400//
401// r2 contains the return address, r3 contains either
402// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
403//
404// The OS to SAL section of struct ia64_sal_os_state is set to a default
405// value of cold boot (MCA) or warm boot (INIT) and return to the same
406// context. ia64_sal_os_state is also used to hold some registers that
407// need to be saved and restored across the stack switches.
408//
409// Most input registers to this stub come from PAL/SAL
410// r1 os gp, physical
411// r8 pal_proc entry point
412// r9 sal_proc entry point
413// r10 sal gp
414// r11 MCA - rendevzous state, INIT - reason code
415// r12 sal return address
416// r17 pal min_state
417// r18 processor state parameter
418// r19 monarch flag, set by the caller of this routine
419//
420// In addition to the SAL to OS state, this routine saves all the
421// registers that appear in struct pt_regs and struct switch_stack,
422// excluding those that are already in the PAL minstate area. This
423// results in a partial pt_regs and switch_stack, the C code copies the
424// remaining registers from PAL minstate to pt_regs and switch_stack. The
425// resulting structures contain all the state of the original process when
426// MCA/INIT occurred.
1da177e4
LT
427//
428//--
429
7f613c7d
KO
430ia64_state_save:
431 add regs=MCA_SOS_OFFSET, r3
432 add ms=MCA_SOS_OFFSET+8, r3
433 mov b0=r2 // save return address
434 cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
435 ;;
436 GET_IA64_MCA_DATA(temp2)
437 ;;
438 add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
439 add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
440 ;;
441 mov regs=temp1 // save the start of sos
442 st8 [temp1]=r1,16 // os_gp
443 st8 [temp2]=r8,16 // pal_proc
444 ;;
445 st8 [temp1]=r9,16 // sal_proc
446 st8 [temp2]=r11,16 // rv_rc
447 mov r11=cr.iipa
448 ;;
d270acbc
KO
449 st8 [temp1]=r18 // proc_state_param
450 st8 [temp2]=r19 // monarch
7f613c7d 451 mov r6=IA64_KR(CURRENT)
d270acbc
KO
452 add temp1=SOS(SAL_RA), regs
453 add temp2=SOS(SAL_GP), regs
7f613c7d
KO
454 ;;
455 st8 [temp1]=r12,16 // sal_ra
456 st8 [temp2]=r10,16 // sal_gp
457 mov r12=cr.isr
458 ;;
459 st8 [temp1]=r17,16 // pal_min_state
460 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
20bb8685
KO
461 mov r6=IA64_KR(CURRENT_STACK)
462 ;;
463 st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
464 st8 [temp2]=r0,16 // prev_task, starts off as NULL
7f613c7d
KO
465 mov r6=cr.ifa
466 ;;
20bb8685
KO
467 st8 [temp1]=r12,16 // cr.isr
468 st8 [temp2]=r6,16 // cr.ifa
7f613c7d
KO
469 mov r12=cr.itir
470 ;;
20bb8685
KO
471 st8 [temp1]=r12,16 // cr.itir
472 st8 [temp2]=r11,16 // cr.iipa
7f613c7d
KO
473 mov r12=cr.iim
474 ;;
d270acbc 475 st8 [temp1]=r12 // cr.iim
7f613c7d
KO
476(p1) mov r12=IA64_MCA_COLD_BOOT
477(p2) mov r12=IA64_INIT_WARM_BOOT
20bb8685 478 mov r6=cr.iha
d270acbc 479 add temp1=SOS(OS_STATUS), regs
7f613c7d 480 ;;
d270acbc
KO
481 st8 [temp2]=r6 // cr.iha
482 add temp2=SOS(CONTEXT), regs
20bb8685 483 st8 [temp1]=r12 // os_status, default is cold boot
7f613c7d
KO
484 mov r6=IA64_MCA_SAME_CONTEXT
485 ;;
2a792058 486 st8 [temp2]=r6 // context, default is same context
7f613c7d
KO
487
488 // Save the pt_regs data that is not in minstate. The previous code
489 // left regs at sos.
490 add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
491 ;;
492 add temp1=PT(B6), regs
493 mov temp3=b6
494 mov temp4=b7
495 add temp2=PT(B7), regs
496 ;;
497 st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
498 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
499 mov temp3=ar.csd
500 mov temp4=ar.ssd
501 cover // must be last in group
1da177e4 502 ;;
7f613c7d
KO
503 st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
504 st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
505 mov temp3=ar.unat
506 mov temp4=ar.pfs
507 ;;
508 st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
509 st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
510 mov temp3=ar.rnat
511 mov temp4=ar.bspstore
512 ;;
513 st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
514 st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
515 mov temp3=ar.bsp
516 ;;
517 sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
518 mov temp4=ar.fpsr
519 ;;
520 shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
521 ;;
522 st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
523 st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
524 mov temp3=ar.ccv
525 ;;
526 st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
527 stf.spill [temp2]=f6,PT(F8)-PT(F6)
528 ;;
529 stf.spill [temp1]=f7,PT(F9)-PT(F7)
530 stf.spill [temp2]=f8,PT(F10)-PT(F8)
531 ;;
532 stf.spill [temp1]=f9,PT(F11)-PT(F9)
533 stf.spill [temp2]=f10
534 ;;
535 stf.spill [temp1]=f11
536
537 // Save the switch_stack data that is not in minstate nor pt_regs. The
538 // previous code left regs at pt_regs.
539 add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
540 ;;
541 add temp1=SW(F2), regs
542 add temp2=SW(F3), regs
543 ;;
544 stf.spill [temp1]=f2,32
545 stf.spill [temp2]=f3,32
546 ;;
547 stf.spill [temp1]=f4,32
548 stf.spill [temp2]=f5,32
549 ;;
550 stf.spill [temp1]=f12,32
551 stf.spill [temp2]=f13,32
552 ;;
553 stf.spill [temp1]=f14,32
554 stf.spill [temp2]=f15,32
555 ;;
556 stf.spill [temp1]=f16,32
557 stf.spill [temp2]=f17,32
558 ;;
559 stf.spill [temp1]=f18,32
560 stf.spill [temp2]=f19,32
561 ;;
562 stf.spill [temp1]=f20,32
563 stf.spill [temp2]=f21,32
564 ;;
565 stf.spill [temp1]=f22,32
566 stf.spill [temp2]=f23,32
567 ;;
568 stf.spill [temp1]=f24,32
569 stf.spill [temp2]=f25,32
570 ;;
571 stf.spill [temp1]=f26,32
572 stf.spill [temp2]=f27,32
573 ;;
574 stf.spill [temp1]=f28,32
575 stf.spill [temp2]=f29,32
576 ;;
577 stf.spill [temp1]=f30,SW(B2)-SW(F30)
578 stf.spill [temp2]=f31,SW(B3)-SW(F31)
579 mov temp3=b2
580 mov temp4=b3
581 ;;
582 st8 [temp1]=temp3,16 // save b2
583 st8 [temp2]=temp4,16 // save b3
584 mov temp3=b4
585 mov temp4=b5
586 ;;
587 st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
588 st8 [temp2]=temp4 // save b5
589 mov temp3=ar.lc
590 ;;
591 st8 [temp1]=temp3 // save ar.lc
592
593 // FIXME: Some proms are incorrectly accessing the minstate area as
594 // cached data. The C code uses region 6, uncached virtual. Ensure
595 // that there is no cache data lying around for the first 1K of the
596 // minstate area.
597 // Remove this code in September 2006, that gives platforms a year to
598 // fix their proms and get their customers updated.
599
600 add r1=32*1,r17
601 add r2=32*2,r17
602 add r3=32*3,r17
603 add r4=32*4,r17
604 add r5=32*5,r17
605 add r6=32*6,r17
606 add r7=32*7,r17
607 ;;
608 fc r17
609 fc r1
610 fc r2
611 fc r3
612 fc r4
613 fc r5
614 fc r6
615 fc r7
616 add r17=32*8,r17
617 add r1=32*8,r1
618 add r2=32*8,r2
619 add r3=32*8,r3
620 add r4=32*8,r4
621 add r5=32*8,r5
622 add r6=32*8,r6
623 add r7=32*8,r7
624 ;;
625 fc r17
626 fc r1
627 fc r2
628 fc r3
629 fc r4
630 fc r5
631 fc r6
632 fc r7
633 add r17=32*8,r17
634 add r1=32*8,r1
635 add r2=32*8,r2
636 add r3=32*8,r3
637 add r4=32*8,r4
638 add r5=32*8,r5
639 add r6=32*8,r6
640 add r7=32*8,r7
641 ;;
642 fc r17
643 fc r1
644 fc r2
645 fc r3
646 fc r4
647 fc r5
648 fc r6
649 fc r7
650 add r17=32*8,r17
651 add r1=32*8,r1
652 add r2=32*8,r2
653 add r3=32*8,r3
654 add r4=32*8,r4
655 add r5=32*8,r5
656 add r6=32*8,r6
657 add r7=32*8,r7
658 ;;
659 fc r17
660 fc r1
661 fc r2
662 fc r3
663 fc r4
664 fc r5
665 fc r6
666 fc r7
667
668 br.sptk b0
1da177e4
LT
669
670//EndStub//////////////////////////////////////////////////////////////////////
671
672
673//++
674// Name:
7f613c7d 675// ia64_state_restore()
1da177e4
LT
676//
677// Stub Description:
678//
7f613c7d
KO
679// Restore the SAL/OS state. This is sensitive to the layout of struct
680// ia64_sal_os_state in mca.h.
681//
682// r2 contains the return address, r3 contains either
683// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
684//
685// In addition to the SAL to OS state, this routine restores all the
686// registers that appear in struct pt_regs and struct switch_stack,
687// excluding those in the PAL minstate area.
1da177e4
LT
688//
689//--
690
7f613c7d
KO
691ia64_state_restore:
692 // Restore the switch_stack data that is not in minstate nor pt_regs.
693 add regs=MCA_SWITCH_STACK_OFFSET, r3
694 mov b0=r2 // save return address
695 ;;
696 GET_IA64_MCA_DATA(temp2)
697 ;;
698 add regs=temp2, regs
699 ;;
700 add temp1=SW(F2), regs
701 add temp2=SW(F3), regs
702 ;;
703 ldf.fill f2=[temp1],32
704 ldf.fill f3=[temp2],32
705 ;;
706 ldf.fill f4=[temp1],32
707 ldf.fill f5=[temp2],32
708 ;;
709 ldf.fill f12=[temp1],32
710 ldf.fill f13=[temp2],32
711 ;;
712 ldf.fill f14=[temp1],32
713 ldf.fill f15=[temp2],32
714 ;;
715 ldf.fill f16=[temp1],32
716 ldf.fill f17=[temp2],32
717 ;;
718 ldf.fill f18=[temp1],32
719 ldf.fill f19=[temp2],32
720 ;;
721 ldf.fill f20=[temp1],32
722 ldf.fill f21=[temp2],32
723 ;;
724 ldf.fill f22=[temp1],32
725 ldf.fill f23=[temp2],32
726 ;;
727 ldf.fill f24=[temp1],32
728 ldf.fill f25=[temp2],32
729 ;;
730 ldf.fill f26=[temp1],32
731 ldf.fill f27=[temp2],32
732 ;;
733 ldf.fill f28=[temp1],32
734 ldf.fill f29=[temp2],32
735 ;;
736 ldf.fill f30=[temp1],SW(B2)-SW(F30)
737 ldf.fill f31=[temp2],SW(B3)-SW(F31)
738 ;;
739 ld8 temp3=[temp1],16 // restore b2
740 ld8 temp4=[temp2],16 // restore b3
741 ;;
742 mov b2=temp3
743 mov b3=temp4
744 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
745 ld8 temp4=[temp2] // restore b5
746 ;;
747 mov b4=temp3
748 mov b5=temp4
749 ld8 temp3=[temp1] // restore ar.lc
750 ;;
751 mov ar.lc=temp3
1da177e4 752
7f613c7d
KO
753 // Restore the pt_regs data that is not in minstate. The previous code
754 // left regs at switch_stack.
755 add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
756 ;;
757 add temp1=PT(B6), regs
758 add temp2=PT(B7), regs
759 ;;
760 ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
761 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
762 ;;
763 mov b6=temp3
764 mov b7=temp4
765 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
766 ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
767 ;;
768 mov ar.csd=temp3
769 mov ar.ssd=temp4
770 ld8 temp3=[temp1] // restore ar.unat
771 add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
772 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
773 ;;
774 mov ar.unat=temp3
775 mov ar.pfs=temp4
776 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
777 ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
778 ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
779 ;;
780 mov ar.ccv=temp3
781 mov ar.fpsr=temp4
782 ldf.fill f6=[temp1],PT(F8)-PT(F6)
783 ldf.fill f7=[temp2],PT(F9)-PT(F7)
784 ;;
785 ldf.fill f8=[temp1],PT(F10)-PT(F8)
786 ldf.fill f9=[temp2],PT(F11)-PT(F9)
787 ;;
788 ldf.fill f10=[temp1]
789 ldf.fill f11=[temp2]
790
791 // Restore the SAL to OS state. The previous code left regs at pt_regs.
792 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
1da177e4 793 ;;
d270acbc
KO
794 add temp1=SOS(SAL_RA), regs
795 add temp2=SOS(SAL_GP), regs
7f613c7d
KO
796 ;;
797 ld8 r12=[temp1],16 // sal_ra
798 ld8 r9=[temp2],16 // sal_gp
799 ;;
20bb8685 800 ld8 r22=[temp1],16 // pal_min_state, virtual
8cab7ccc 801 ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
7f613c7d 802 ;;
20bb8685
KO
803 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
804 ld8 r20=[temp2],16 // prev_task
805 ;;
7f613c7d
KO
806 ld8 temp3=[temp1],16 // cr.isr
807 ld8 temp4=[temp2],16 // cr.ifa
808 ;;
809 mov cr.isr=temp3
810 mov cr.ifa=temp4
811 ld8 temp3=[temp1],16 // cr.itir
812 ld8 temp4=[temp2],16 // cr.iipa
813 ;;
814 mov cr.itir=temp3
815 mov cr.iipa=temp4
d270acbc
KO
816 ld8 temp3=[temp1] // cr.iim
817 ld8 temp4=[temp2] // cr.iha
818 add temp1=SOS(OS_STATUS), regs
819 add temp2=SOS(CONTEXT), regs
7f613c7d
KO
820 ;;
821 mov cr.iim=temp3
822 mov cr.iha=temp4
8a4b7b6f 823 dep r22=0,r22,62,1 // pal_min_state, physical, uncached
8cab7ccc 824 mov IA64_KR(CURRENT)=r13
7f613c7d
KO
825 ld8 r8=[temp1] // os_status
826 ld8 r10=[temp2] // context
827
20bb8685
KO
828 /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
829 * avoid any dependencies on the algorithm in ia64_switch_to(), just
830 * purge any existing CURRENT_STACK mapping and insert the new one.
831 *
8cab7ccc 832 * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
20bb8685
KO
833 * prev_IA64_KR_CURRENT, these values may have been changed by the C
834 * code. Do not use r8, r9, r10, r22, they contain values ready for
835 * the return to SAL.
836 */
837
838 mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
839 ;;
840 shl r15=r15,IA64_GRANULE_SHIFT
841 ;;
842 dep r15=-1,r15,61,3 // virtual granule
843 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
844 ;;
845 ptr.d r15,r18
846 ;;
847 srlz.d
848
8cab7ccc 849 extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
20bb8685
KO
850 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
851 movl r21=PAGE_KERNEL // page properties
852 ;;
853 mov IA64_KR(CURRENT_STACK)=r16
854 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
855 or r21=r20,r21 // construct PA | page properties
856(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
857 ;;
858 mov cr.itir=r18
8cab7ccc 859 mov cr.ifa=r13
20bb8685
KO
860 mov r20=IA64_TR_CURRENT_STACK
861 ;;
862 itr.d dtr[r20]=r21
863 ;;
864 srlz.d
8651:
866
7f613c7d 867 br.sptk b0
1da177e4
LT
868
869//EndStub//////////////////////////////////////////////////////////////////////
870
871
7f613c7d
KO
872//++
873// Name:
874// ia64_new_stack()
1da177e4 875//
7f613c7d 876// Stub Description:
1da177e4 877//
7f613c7d 878// Switch to the MCA/INIT stack.
1da177e4 879//
7f613c7d
KO
880// r2 contains the return address, r3 contains either
881// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 882//
7f613c7d
KO
883// On entry RBS is still on the original stack, this routine switches RBS
884// to use the MCA/INIT stack.
1da177e4 885//
7f613c7d
KO
886// On entry, sos->pal_min_state is physical, on exit it is virtual.
887//
888//--
1da177e4 889
7f613c7d
KO
890ia64_new_stack:
891 add regs=MCA_PT_REGS_OFFSET, r3
d270acbc 892 add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
7f613c7d
KO
893 mov b0=r2 // save return address
894 GET_IA64_MCA_DATA(temp1)
895 invala
1da177e4 896 ;;
7f613c7d
KO
897 add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
898 add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
1da177e4 899 ;;
7f613c7d
KO
900 // Address of minstate area provided by PAL is physical, uncacheable.
901 // Convert to Linux virtual address in region 6 for C code.
902 ld8 ms=[temp2] // pal_min_state, physical
1da177e4 903 ;;
7f613c7d
KO
904 dep temp1=-1,ms,62,2 // set region 6
905 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
906 ;;
907 st8 [temp2]=temp1 // pal_min_state, virtual
1da177e4 908
7f613c7d 909 add temp4=temp3, regs // start of bspstore on new stack
1da177e4 910 ;;
7f613c7d 911 mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
1da177e4 912 ;;
7f613c7d
KO
913 flushrs // must be first in group
914 br.sptk b0
915
916//EndStub//////////////////////////////////////////////////////////////////////
917
918
919//++
920// Name:
921// ia64_old_stack()
922//
923// Stub Description:
924//
925// Switch to the old stack.
926//
927// r2 contains the return address, r3 contains either
928// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
929//
930// On entry, pal_min_state is virtual, on exit it is physical.
931//
932// On entry RBS is on the MCA/INIT stack, this routine switches RBS
933// back to the previous stack.
934//
935// The psr is set to all zeroes. SAL return requires either all zeroes or
936// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
937// code does not perform correctly.
938//
939// The dirty registers at the time of the event were flushed to the
940// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
941// before reverting to the previous bspstore.
942//--
943
944ia64_old_stack:
945 add regs=MCA_PT_REGS_OFFSET, r3
946 mov b0=r2 // save return address
947 GET_IA64_MCA_DATA(temp2)
948 LOAD_PHYSICAL(p0,temp1,1f)
1da177e4 949 ;;
7f613c7d
KO
950 mov cr.ipsr=r0
951 mov cr.ifs=r0
952 mov cr.iip=temp1
1da177e4 953 ;;
7f613c7d 954 invala
1da177e4 955 rfi
7f613c7d
KO
9561:
957
958 add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
1da177e4 959 ;;
7f613c7d 960 add temp1=PT(LOADRS), regs
1da177e4 961 ;;
7f613c7d 962 ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
1da177e4 963 ;;
7f613c7d
KO
964 ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
965 mov ar.rsc=temp2
966 ;;
967 loadrs
968 ld8 temp4=[temp1] // restore ar.rnat
969 ;;
970 mov ar.bspstore=temp3 // back to old stack
971 ;;
972 mov ar.rnat=temp4
973 ;;
974
975 br.sptk b0
1da177e4 976
7f613c7d 977//EndStub//////////////////////////////////////////////////////////////////////
1da177e4 978
1da177e4 979
7f613c7d
KO
980//++
981// Name:
982// ia64_set_kernel_registers()
1da177e4 983//
7f613c7d
KO
984// Stub Description:
985//
986// Set the registers that are required by the C code in order to run on an
987// MCA/INIT stack.
988//
989// r2 contains the return address, r3 contains either
990// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 991//
7f613c7d
KO
992//--
993
994ia64_set_kernel_registers:
995 add temp3=MCA_SP_OFFSET, r3
7f613c7d
KO
996 mov b0=r2 // save return address
997 GET_IA64_MCA_DATA(temp1)
998 ;;
7f613c7d
KO
999 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
1000 add r13=temp1, r3 // set current to start of MCA/INIT stack
20bb8685 1001 add r20=temp1, r3 // physical start of MCA/INIT stack
7f613c7d 1002 ;;
7f613c7d
KO
1003 DATA_PA_TO_VA(r12,temp2)
1004 DATA_PA_TO_VA(r13,temp3)
1005 ;;
1006 mov IA64_KR(CURRENT)=r13
1007
20bb8685
KO
1008 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
1009 * any dependencies on the algorithm in ia64_switch_to(), just purge
1010 * any existing CURRENT_STACK mapping and insert the new one.
1011 */
1012
1013 mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
1014 ;;
1015 shl r16=r16,IA64_GRANULE_SHIFT
1016 ;;
1017 dep r16=-1,r16,61,3 // virtual granule
1018 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
1019 ;;
1020 ptr.d r16,r18
1021 ;;
1022 srlz.d
1023
1024 shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
1025 movl r21=PAGE_KERNEL // page properties
1026 ;;
1027 mov IA64_KR(CURRENT_STACK)=r16
1028 or r21=r20,r21 // construct PA | page properties
1029 ;;
1030 mov cr.itir=r18
1031 mov cr.ifa=r13
1032 mov r20=IA64_TR_CURRENT_STACK
8f9e1467
RA
1033
1034 movl r17=FPSR_DEFAULT
1035 ;;
1036 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
20bb8685
KO
1037 ;;
1038 itr.d dtr[r20]=r21
1039 ;;
1040 srlz.d
7f613c7d
KO
1041
1042 br.sptk b0
1043
1044//EndStub//////////////////////////////////////////////////////////////////////
1045
1046#undef ms
1047#undef regs
1048#undef temp1
1049#undef temp2
1050#undef temp3
1051#undef temp4
1052
1da177e4 1053
7f613c7d
KO
1054// Support function for mca.c, it is here to avoid using inline asm. Given the
1055// address of an rnat slot, if that address is below the current ar.bspstore
1056// then return the contents of that slot, otherwise return the contents of
1057// ar.rnat.
1058GLOBAL_ENTRY(ia64_get_rnat)
1059 alloc r14=ar.pfs,1,0,0,0
1060 mov ar.rsc=0
1061 ;;
1062 mov r14=ar.bspstore
1063 ;;
1064 cmp.lt p6,p7=in0,r14
1065 ;;
1066(p6) ld8 r8=[in0]
1067(p7) mov r8=ar.rnat
1068 mov ar.rsc=3
1069 br.ret.sptk.many rp
1070END(ia64_get_rnat)