]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ia64/kernel/mca_asm.S
[IA64] Make PCI Express support selectable
[mirror_ubuntu-bionic-kernel.git] / arch / ia64 / kernel / mca_asm.S
CommitLineData
1da177e4
LT
1//
2// assembly portion of the IA64 MCA handling
3//
4// Mods by cfleck to integrate into kernel build
5// 00/03/15 davidm Added various stop bits to get a clean compile
6//
7// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8// kstack, switch modes, jump to C INIT handler
9//
10// 02/01/04 J.Hall <jenna.s.hall@intel.com>
11// Before entering virtual mode code:
12// 1. Check for TLB CPU error
13// 2. Restore current thread pointer to kr6
14// 3. Move stack ptr 16 bytes to conform to C calling convention
15//
16// 04/11/12 Russ Anderson <rja@sgi.com>
17// Added per cpu MCA/INIT stack save areas.
18//
7f613c7d
KO
19// 12/08/05 Keith Owens <kaos@sgi.com>
20// Use per cpu MCA/INIT stacks for all data.
21//
1da177e4
LT
22#include <linux/config.h>
23#include <linux/threads.h>
24
25#include <asm/asmmacro.h>
26#include <asm/pgtable.h>
27#include <asm/processor.h>
28#include <asm/mca_asm.h>
29#include <asm/mca.h>
30
7f613c7d 31#include "entry.h"
1da177e4
LT
32
33#define GET_IA64_MCA_DATA(reg) \
34 GET_THIS_PADDR(reg, ia64_mca_data) \
35 ;; \
36 ld8 reg=[reg]
37
b8d8b883 38 .global ia64_do_tlb_purge
7f613c7d
KO
39 .global ia64_os_mca_dispatch
40 .global ia64_os_init_dispatch_monarch
41 .global ia64_os_init_dispatch_slave
1da177e4
LT
42
43 .text
44 .align 16
45
7f613c7d
KO
46//StartMain////////////////////////////////////////////////////////////////////
47
b8d8b883
AR
48/*
49 * Just the TLB purge part is moved to a separate function
50 * so we can re-use the code for cpu hotplug code as well
51 * Caller should now setup b1, so we can branch once the
52 * tlb flush is complete.
53 */
1da177e4 54
b8d8b883 55ia64_do_tlb_purge:
1da177e4
LT
56#define O(member) IA64_CPUINFO_##member##_OFFSET
57
58 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
59 ;;
60 addl r17=O(PTCE_STRIDE),r2
61 addl r2=O(PTCE_BASE),r2
62 ;;
63 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
64 ld4 r19=[r2],4 // r19=ptce_count[0]
65 ld4 r21=[r17],4 // r21=ptce_stride[0]
66 ;;
67 ld4 r20=[r2] // r20=ptce_count[1]
68 ld4 r22=[r17] // r22=ptce_stride[1]
69 mov r24=0
70 ;;
71 adds r20=-1,r20
72 ;;
73#undef O
74
752:
76 cmp.ltu p6,p7=r24,r19
77(p7) br.cond.dpnt.few 4f
78 mov ar.lc=r20
793:
80 ptc.e r18
81 ;;
82 add r18=r22,r18
83 br.cloop.sptk.few 3b
84 ;;
85 add r18=r21,r18
86 add r24=1,r24
87 ;;
88 br.sptk.few 2b
894:
90 srlz.i // srlz.i implies srlz.d
91 ;;
92
93 // Now purge addresses formerly mapped by TR registers
94 // 1. Purge ITR&DTR for kernel.
95 movl r16=KERNEL_START
96 mov r18=KERNEL_TR_PAGE_SHIFT<<2
97 ;;
98 ptr.i r16, r18
99 ptr.d r16, r18
100 ;;
101 srlz.i
102 ;;
103 srlz.d
104 ;;
105 // 2. Purge DTR for PERCPU data.
106 movl r16=PERCPU_ADDR
107 mov r18=PERCPU_PAGE_SHIFT<<2
108 ;;
109 ptr.d r16,r18
110 ;;
111 srlz.d
112 ;;
113 // 3. Purge ITR for PAL code.
114 GET_THIS_PADDR(r2, ia64_mca_pal_base)
115 ;;
116 ld8 r16=[r2]
117 mov r18=IA64_GRANULE_SHIFT<<2
118 ;;
119 ptr.i r16,r18
120 ;;
121 srlz.i
122 ;;
123 // 4. Purge DTR for stack.
124 mov r16=IA64_KR(CURRENT_STACK)
125 ;;
126 shl r16=r16,IA64_GRANULE_SHIFT
127 movl r19=PAGE_OFFSET
128 ;;
129 add r16=r19,r16
130 mov r18=IA64_GRANULE_SHIFT<<2
131 ;;
132 ptr.d r16,r18
133 ;;
134 srlz.i
135 ;;
b8d8b883
AR
136 // Now branch away to caller.
137 br.sptk.many b1
138 ;;
139
7f613c7d
KO
140//EndMain//////////////////////////////////////////////////////////////////////
141
142//StartMain////////////////////////////////////////////////////////////////////
b8d8b883 143
7f613c7d 144ia64_os_mca_dispatch:
b8d8b883
AR
145 // Serialize all MCA processing
146 mov r3=1;;
147 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
148ia64_os_mca_spin:
7f613c7d 149 xchg4 r4=[r2],r3;;
b8d8b883
AR
150 cmp.ne p6,p0=r4,r0
151(p6) br ia64_os_mca_spin
152
7f613c7d
KO
153 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
154 LOAD_PHYSICAL(p0,r2,1f) // return address
155 mov r19=1 // All MCA events are treated as monarch (for now)
156 br.sptk ia64_state_save // save the state that is not in minstate
1571:
b8d8b883 158
7f613c7d
KO
159 GET_IA64_MCA_DATA(r2)
160 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
161 ;;
162 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2
b8d8b883 163 ;;
7f613c7d 164 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
b8d8b883
AR
165 ;;
166 tbit.nz p6,p7=r18,60
167(p7) br.spnt done_tlb_purge_and_reload
168
169 // The following code purges TC and TR entries. Then reload all TC entries.
170 // Purge percpu data TC entries.
171begin_tlb_purge_and_reload:
172 movl r18=ia64_reload_tr;;
173 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
174 mov b1=r18;;
175 br.sptk.many ia64_do_tlb_purge;;
176
177ia64_reload_tr:
1da177e4
LT
178 // Finally reload the TR registers.
179 // 1. Reload DTR/ITR registers for kernel.
180 mov r18=KERNEL_TR_PAGE_SHIFT<<2
181 movl r17=KERNEL_START
182 ;;
183 mov cr.itir=r18
184 mov cr.ifa=r17
185 mov r16=IA64_TR_KERNEL
186 mov r19=ip
187 movl r18=PAGE_KERNEL
188 ;;
189 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
190 ;;
191 or r18=r17,r18
192 ;;
193 itr.i itr[r16]=r18
194 ;;
195 itr.d dtr[r16]=r18
196 ;;
197 srlz.i
198 srlz.d
199 ;;
200 // 2. Reload DTR register for PERCPU data.
201 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
202 ;;
203 movl r16=PERCPU_ADDR // vaddr
204 movl r18=PERCPU_PAGE_SHIFT<<2
205 ;;
206 mov cr.itir=r18
207 mov cr.ifa=r16
208 ;;
209 ld8 r18=[r2] // load per-CPU PTE
210 mov r16=IA64_TR_PERCPU_DATA;
211 ;;
212 itr.d dtr[r16]=r18
213 ;;
214 srlz.d
215 ;;
216 // 3. Reload ITR for PAL code.
217 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
218 ;;
219 ld8 r18=[r2] // load PAL PTE
220 ;;
221 GET_THIS_PADDR(r2, ia64_mca_pal_base)
222 ;;
223 ld8 r16=[r2] // load PAL vaddr
224 mov r19=IA64_GRANULE_SHIFT<<2
225 ;;
226 mov cr.itir=r19
227 mov cr.ifa=r16
228 mov r20=IA64_TR_PALCODE
229 ;;
230 itr.i itr[r20]=r18
231 ;;
232 srlz.i
233 ;;
234 // 4. Reload DTR for stack.
235 mov r16=IA64_KR(CURRENT_STACK)
236 ;;
237 shl r16=r16,IA64_GRANULE_SHIFT
238 movl r19=PAGE_OFFSET
239 ;;
240 add r18=r19,r16
241 movl r20=PAGE_KERNEL
242 ;;
243 add r16=r20,r16
244 mov r19=IA64_GRANULE_SHIFT<<2
245 ;;
246 mov cr.itir=r19
247 mov cr.ifa=r18
248 mov r20=IA64_TR_CURRENT_STACK
249 ;;
250 itr.d dtr[r20]=r16
251 ;;
252 srlz.d
1da177e4
LT
253
254done_tlb_purge_and_reload:
255
7f613c7d
KO
256 // switch to per cpu MCA stack
257 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
258 LOAD_PHYSICAL(p0,r2,1f) // return address
259 br.sptk ia64_new_stack
2601:
261
262 // everything saved, now we can set the kernel registers
263 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
264 LOAD_PHYSICAL(p0,r2,1f) // return address
265 br.sptk ia64_set_kernel_registers
2661:
1da177e4 267
7f613c7d 268 // This must be done in physical mode
1da177e4
LT
269 GET_IA64_MCA_DATA(r2)
270 ;;
7f613c7d 271 mov r7=r2
1da177e4
LT
272
273 // Enter virtual mode from physical mode
274 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
7f613c7d
KO
275
276 // This code returns to SAL via SOS r2, in general SAL has no unwind
277 // data. To get a clean termination when backtracing the C MCA/INIT
278 // handler, set a dummy return address of 0 in this routine. That
279 // requires that ia64_os_mca_virtual_begin be a global function.
280ENTRY(ia64_os_mca_virtual_begin)
281 .prologue
282 .save rp,r0
283 .body
284
285 mov ar.rsc=3 // set eager mode for C handler
286 mov r2=r7 // see GET_IA64_MCA_DATA above
287 ;;
1da177e4
LT
288
289 // Call virtual mode handler
7f613c7d
KO
290 alloc r14=ar.pfs,0,0,3,0
291 ;;
292 DATA_PA_TO_VA(r2,r7)
293 ;;
294 add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
295 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
296 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
297 br.call.sptk.many b0=ia64_mca_handler
298
1da177e4
LT
299 // Revert back to physical mode before going back to SAL
300 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
301ia64_os_mca_virtual_end:
302
7f613c7d
KO
303END(ia64_os_mca_virtual_begin)
304
305 // switch back to previous stack
306 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
307 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
308 LOAD_PHYSICAL(p0,r2,1f) // return address
309 br.sptk ia64_old_stack
3101:
311
312 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
313 LOAD_PHYSICAL(p0,r2,1f) // return address
314 br.sptk ia64_state_restore // restore the SAL state
3151:
316
317 mov b0=r12 // SAL_CHECK return address
318
319 // release lock
320 LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
321 st4.rel [r3]=r0
322
323 br b0
324
325//EndMain//////////////////////////////////////////////////////////////////////
326
327//StartMain////////////////////////////////////////////////////////////////////
328
329//
330// SAL to OS entry point for INIT on all processors. This has been defined for
331// registration purposes with SAL as a part of ia64_mca_init. Monarch and
332// slave INIT have identical processing, except for the value of the
333// sos->monarch flag in r19.
334//
335
336ia64_os_init_dispatch_monarch:
337 mov r19=1 // Bow, bow, ye lower middle classes!
338 br.sptk ia64_os_init_dispatch
339
340ia64_os_init_dispatch_slave:
341 mov r19=0 // <igor>yeth, mathter</igor>
342
343ia64_os_init_dispatch:
344
345 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
346 LOAD_PHYSICAL(p0,r2,1f) // return address
347 br.sptk ia64_state_save // save the state that is not in minstate
3481:
349
350 // switch to per cpu INIT stack
351 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
352 LOAD_PHYSICAL(p0,r2,1f) // return address
353 br.sptk ia64_new_stack
3541:
355
356 // everything saved, now we can set the kernel registers
357 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
358 LOAD_PHYSICAL(p0,r2,1f) // return address
359 br.sptk ia64_set_kernel_registers
3601:
361
362 // This must be done in physical mode
1da177e4
LT
363 GET_IA64_MCA_DATA(r2)
364 ;;
7f613c7d
KO
365 mov r7=r2
366
367 // Enter virtual mode from physical mode
368 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
369
370 // This code returns to SAL via SOS r2, in general SAL has no unwind
371 // data. To get a clean termination when backtracing the C MCA/INIT
372 // handler, set a dummy return address of 0 in this routine. That
373 // requires that ia64_os_init_virtual_begin be a global function.
374ENTRY(ia64_os_init_virtual_begin)
375 .prologue
376 .save rp,r0
377 .body
378
379 mov ar.rsc=3 // set eager mode for C handler
380 mov r2=r7 // see GET_IA64_MCA_DATA above
1da177e4 381 ;;
1da177e4 382
7f613c7d
KO
383 // Call virtual mode handler
384 alloc r14=ar.pfs,0,0,3,0
385 ;;
386 DATA_PA_TO_VA(r2,r7)
1da177e4 387 ;;
7f613c7d
KO
388 add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
389 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
390 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
391 br.call.sptk.many b0=ia64_init_handler
1da177e4 392
7f613c7d
KO
393 // Revert back to physical mode before going back to SAL
394 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
395ia64_os_init_virtual_end:
1da177e4 396
7f613c7d
KO
397END(ia64_os_init_virtual_begin)
398
399 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
400 LOAD_PHYSICAL(p0,r2,1f) // return address
401 br.sptk ia64_state_restore // restore the SAL state
4021:
1da177e4 403
7f613c7d
KO
404 // switch back to previous stack
405 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
406 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
407 LOAD_PHYSICAL(p0,r2,1f) // return address
408 br.sptk ia64_old_stack
4091:
410
411 mov b0=r12 // SAL_CHECK return address
1da177e4 412 br b0
7f613c7d 413
1da177e4
LT
414//EndMain//////////////////////////////////////////////////////////////////////
415
7f613c7d
KO
416// common defines for the stubs
417#define ms r4
418#define regs r5
419#define temp1 r2 /* careful, it overlaps with input registers */
420#define temp2 r3 /* careful, it overlaps with input registers */
421#define temp3 r7
422#define temp4 r14
423
1da177e4
LT
424
425//++
426// Name:
7f613c7d 427// ia64_state_save()
1da177e4
LT
428//
429// Stub Description:
430//
7f613c7d
KO
431// Save the state that is not in minstate. This is sensitive to the layout of
432// struct ia64_sal_os_state in mca.h.
433//
434// r2 contains the return address, r3 contains either
435// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
436//
437// The OS to SAL section of struct ia64_sal_os_state is set to a default
438// value of cold boot (MCA) or warm boot (INIT) and return to the same
439// context. ia64_sal_os_state is also used to hold some registers that
440// need to be saved and restored across the stack switches.
441//
442// Most input registers to this stub come from PAL/SAL
443// r1 os gp, physical
444// r8 pal_proc entry point
445// r9 sal_proc entry point
446// r10 sal gp
447// r11 MCA - rendevzous state, INIT - reason code
448// r12 sal return address
449// r17 pal min_state
450// r18 processor state parameter
451// r19 monarch flag, set by the caller of this routine
452//
453// In addition to the SAL to OS state, this routine saves all the
454// registers that appear in struct pt_regs and struct switch_stack,
455// excluding those that are already in the PAL minstate area. This
456// results in a partial pt_regs and switch_stack, the C code copies the
457// remaining registers from PAL minstate to pt_regs and switch_stack. The
458// resulting structures contain all the state of the original process when
459// MCA/INIT occurred.
1da177e4
LT
460//
461//--
462
7f613c7d
KO
463ia64_state_save:
464 add regs=MCA_SOS_OFFSET, r3
465 add ms=MCA_SOS_OFFSET+8, r3
466 mov b0=r2 // save return address
467 cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
468 ;;
469 GET_IA64_MCA_DATA(temp2)
470 ;;
471 add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
472 add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
473 ;;
474 mov regs=temp1 // save the start of sos
475 st8 [temp1]=r1,16 // os_gp
476 st8 [temp2]=r8,16 // pal_proc
477 ;;
478 st8 [temp1]=r9,16 // sal_proc
479 st8 [temp2]=r11,16 // rv_rc
480 mov r11=cr.iipa
481 ;;
482 st8 [temp1]=r18,16 // proc_state_param
483 st8 [temp2]=r19,16 // monarch
484 mov r6=IA64_KR(CURRENT)
485 ;;
486 st8 [temp1]=r12,16 // sal_ra
487 st8 [temp2]=r10,16 // sal_gp
488 mov r12=cr.isr
489 ;;
490 st8 [temp1]=r17,16 // pal_min_state
491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
20bb8685
KO
492 mov r6=IA64_KR(CURRENT_STACK)
493 ;;
494 st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
495 st8 [temp2]=r0,16 // prev_task, starts off as NULL
7f613c7d
KO
496 mov r6=cr.ifa
497 ;;
20bb8685
KO
498 st8 [temp1]=r12,16 // cr.isr
499 st8 [temp2]=r6,16 // cr.ifa
7f613c7d
KO
500 mov r12=cr.itir
501 ;;
20bb8685
KO
502 st8 [temp1]=r12,16 // cr.itir
503 st8 [temp2]=r11,16 // cr.iipa
7f613c7d
KO
504 mov r12=cr.iim
505 ;;
20bb8685 506 st8 [temp1]=r12,16 // cr.iim
7f613c7d
KO
507(p1) mov r12=IA64_MCA_COLD_BOOT
508(p2) mov r12=IA64_INIT_WARM_BOOT
20bb8685 509 mov r6=cr.iha
7f613c7d 510 ;;
20bb8685
KO
511 st8 [temp2]=r6,16 // cr.iha
512 st8 [temp1]=r12 // os_status, default is cold boot
7f613c7d
KO
513 mov r6=IA64_MCA_SAME_CONTEXT
514 ;;
2a792058 515 st8 [temp2]=r6 // context, default is same context
7f613c7d
KO
516
517 // Save the pt_regs data that is not in minstate. The previous code
518 // left regs at sos.
519 add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
520 ;;
521 add temp1=PT(B6), regs
522 mov temp3=b6
523 mov temp4=b7
524 add temp2=PT(B7), regs
525 ;;
526 st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
527 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
528 mov temp3=ar.csd
529 mov temp4=ar.ssd
530 cover // must be last in group
1da177e4 531 ;;
7f613c7d
KO
532 st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
533 st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
534 mov temp3=ar.unat
535 mov temp4=ar.pfs
536 ;;
537 st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
538 st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
539 mov temp3=ar.rnat
540 mov temp4=ar.bspstore
541 ;;
542 st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
543 st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
544 mov temp3=ar.bsp
545 ;;
546 sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
547 mov temp4=ar.fpsr
548 ;;
549 shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
550 ;;
551 st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
552 st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
553 mov temp3=ar.ccv
554 ;;
555 st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
556 stf.spill [temp2]=f6,PT(F8)-PT(F6)
557 ;;
558 stf.spill [temp1]=f7,PT(F9)-PT(F7)
559 stf.spill [temp2]=f8,PT(F10)-PT(F8)
560 ;;
561 stf.spill [temp1]=f9,PT(F11)-PT(F9)
562 stf.spill [temp2]=f10
563 ;;
564 stf.spill [temp1]=f11
565
566 // Save the switch_stack data that is not in minstate nor pt_regs. The
567 // previous code left regs at pt_regs.
568 add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
569 ;;
570 add temp1=SW(F2), regs
571 add temp2=SW(F3), regs
572 ;;
573 stf.spill [temp1]=f2,32
574 stf.spill [temp2]=f3,32
575 ;;
576 stf.spill [temp1]=f4,32
577 stf.spill [temp2]=f5,32
578 ;;
579 stf.spill [temp1]=f12,32
580 stf.spill [temp2]=f13,32
581 ;;
582 stf.spill [temp1]=f14,32
583 stf.spill [temp2]=f15,32
584 ;;
585 stf.spill [temp1]=f16,32
586 stf.spill [temp2]=f17,32
587 ;;
588 stf.spill [temp1]=f18,32
589 stf.spill [temp2]=f19,32
590 ;;
591 stf.spill [temp1]=f20,32
592 stf.spill [temp2]=f21,32
593 ;;
594 stf.spill [temp1]=f22,32
595 stf.spill [temp2]=f23,32
596 ;;
597 stf.spill [temp1]=f24,32
598 stf.spill [temp2]=f25,32
599 ;;
600 stf.spill [temp1]=f26,32
601 stf.spill [temp2]=f27,32
602 ;;
603 stf.spill [temp1]=f28,32
604 stf.spill [temp2]=f29,32
605 ;;
606 stf.spill [temp1]=f30,SW(B2)-SW(F30)
607 stf.spill [temp2]=f31,SW(B3)-SW(F31)
608 mov temp3=b2
609 mov temp4=b3
610 ;;
611 st8 [temp1]=temp3,16 // save b2
612 st8 [temp2]=temp4,16 // save b3
613 mov temp3=b4
614 mov temp4=b5
615 ;;
616 st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
617 st8 [temp2]=temp4 // save b5
618 mov temp3=ar.lc
619 ;;
620 st8 [temp1]=temp3 // save ar.lc
621
622 // FIXME: Some proms are incorrectly accessing the minstate area as
623 // cached data. The C code uses region 6, uncached virtual. Ensure
624 // that there is no cache data lying around for the first 1K of the
625 // minstate area.
626 // Remove this code in September 2006, that gives platforms a year to
627 // fix their proms and get their customers updated.
628
629 add r1=32*1,r17
630 add r2=32*2,r17
631 add r3=32*3,r17
632 add r4=32*4,r17
633 add r5=32*5,r17
634 add r6=32*6,r17
635 add r7=32*7,r17
636 ;;
637 fc r17
638 fc r1
639 fc r2
640 fc r3
641 fc r4
642 fc r5
643 fc r6
644 fc r7
645 add r17=32*8,r17
646 add r1=32*8,r1
647 add r2=32*8,r2
648 add r3=32*8,r3
649 add r4=32*8,r4
650 add r5=32*8,r5
651 add r6=32*8,r6
652 add r7=32*8,r7
653 ;;
654 fc r17
655 fc r1
656 fc r2
657 fc r3
658 fc r4
659 fc r5
660 fc r6
661 fc r7
662 add r17=32*8,r17
663 add r1=32*8,r1
664 add r2=32*8,r2
665 add r3=32*8,r3
666 add r4=32*8,r4
667 add r5=32*8,r5
668 add r6=32*8,r6
669 add r7=32*8,r7
670 ;;
671 fc r17
672 fc r1
673 fc r2
674 fc r3
675 fc r4
676 fc r5
677 fc r6
678 fc r7
679 add r17=32*8,r17
680 add r1=32*8,r1
681 add r2=32*8,r2
682 add r3=32*8,r3
683 add r4=32*8,r4
684 add r5=32*8,r5
685 add r6=32*8,r6
686 add r7=32*8,r7
687 ;;
688 fc r17
689 fc r1
690 fc r2
691 fc r3
692 fc r4
693 fc r5
694 fc r6
695 fc r7
696
697 br.sptk b0
1da177e4
LT
698
699//EndStub//////////////////////////////////////////////////////////////////////
700
701
702//++
703// Name:
7f613c7d 704// ia64_state_restore()
1da177e4
LT
705//
706// Stub Description:
707//
7f613c7d
KO
708// Restore the SAL/OS state. This is sensitive to the layout of struct
709// ia64_sal_os_state in mca.h.
710//
711// r2 contains the return address, r3 contains either
712// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
713//
714// In addition to the SAL to OS state, this routine restores all the
715// registers that appear in struct pt_regs and struct switch_stack,
716// excluding those in the PAL minstate area.
1da177e4
LT
717//
718//--
719
7f613c7d
KO
720ia64_state_restore:
721 // Restore the switch_stack data that is not in minstate nor pt_regs.
722 add regs=MCA_SWITCH_STACK_OFFSET, r3
723 mov b0=r2 // save return address
724 ;;
725 GET_IA64_MCA_DATA(temp2)
726 ;;
727 add regs=temp2, regs
728 ;;
729 add temp1=SW(F2), regs
730 add temp2=SW(F3), regs
731 ;;
732 ldf.fill f2=[temp1],32
733 ldf.fill f3=[temp2],32
734 ;;
735 ldf.fill f4=[temp1],32
736 ldf.fill f5=[temp2],32
737 ;;
738 ldf.fill f12=[temp1],32
739 ldf.fill f13=[temp2],32
740 ;;
741 ldf.fill f14=[temp1],32
742 ldf.fill f15=[temp2],32
743 ;;
744 ldf.fill f16=[temp1],32
745 ldf.fill f17=[temp2],32
746 ;;
747 ldf.fill f18=[temp1],32
748 ldf.fill f19=[temp2],32
749 ;;
750 ldf.fill f20=[temp1],32
751 ldf.fill f21=[temp2],32
752 ;;
753 ldf.fill f22=[temp1],32
754 ldf.fill f23=[temp2],32
755 ;;
756 ldf.fill f24=[temp1],32
757 ldf.fill f25=[temp2],32
758 ;;
759 ldf.fill f26=[temp1],32
760 ldf.fill f27=[temp2],32
761 ;;
762 ldf.fill f28=[temp1],32
763 ldf.fill f29=[temp2],32
764 ;;
765 ldf.fill f30=[temp1],SW(B2)-SW(F30)
766 ldf.fill f31=[temp2],SW(B3)-SW(F31)
767 ;;
768 ld8 temp3=[temp1],16 // restore b2
769 ld8 temp4=[temp2],16 // restore b3
770 ;;
771 mov b2=temp3
772 mov b3=temp4
773 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
774 ld8 temp4=[temp2] // restore b5
775 ;;
776 mov b4=temp3
777 mov b5=temp4
778 ld8 temp3=[temp1] // restore ar.lc
779 ;;
780 mov ar.lc=temp3
1da177e4 781
7f613c7d
KO
782 // Restore the pt_regs data that is not in minstate. The previous code
783 // left regs at switch_stack.
784 add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
785 ;;
786 add temp1=PT(B6), regs
787 add temp2=PT(B7), regs
788 ;;
789 ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
790 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
791 ;;
792 mov b6=temp3
793 mov b7=temp4
794 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
795 ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
796 ;;
797 mov ar.csd=temp3
798 mov ar.ssd=temp4
799 ld8 temp3=[temp1] // restore ar.unat
800 add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
801 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
802 ;;
803 mov ar.unat=temp3
804 mov ar.pfs=temp4
805 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
806 ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
807 ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
808 ;;
809 mov ar.ccv=temp3
810 mov ar.fpsr=temp4
811 ldf.fill f6=[temp1],PT(F8)-PT(F6)
812 ldf.fill f7=[temp2],PT(F9)-PT(F7)
813 ;;
814 ldf.fill f8=[temp1],PT(F10)-PT(F8)
815 ldf.fill f9=[temp2],PT(F11)-PT(F9)
816 ;;
817 ldf.fill f10=[temp1]
818 ldf.fill f11=[temp2]
819
820 // Restore the SAL to OS state. The previous code left regs at pt_regs.
821 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
1da177e4 822 ;;
7f613c7d
KO
823 add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs
824 add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs
825 ;;
826 ld8 r12=[temp1],16 // sal_ra
827 ld8 r9=[temp2],16 // sal_gp
828 ;;
20bb8685 829 ld8 r22=[temp1],16 // pal_min_state, virtual
8cab7ccc 830 ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
7f613c7d 831 ;;
20bb8685
KO
832 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
833 ld8 r20=[temp2],16 // prev_task
834 ;;
7f613c7d
KO
835 ld8 temp3=[temp1],16 // cr.isr
836 ld8 temp4=[temp2],16 // cr.ifa
837 ;;
838 mov cr.isr=temp3
839 mov cr.ifa=temp4
840 ld8 temp3=[temp1],16 // cr.itir
841 ld8 temp4=[temp2],16 // cr.iipa
842 ;;
843 mov cr.itir=temp3
844 mov cr.iipa=temp4
845 ld8 temp3=[temp1],16 // cr.iim
846 ld8 temp4=[temp2],16 // cr.iha
847 ;;
848 mov cr.iim=temp3
849 mov cr.iha=temp4
8a4b7b6f 850 dep r22=0,r22,62,1 // pal_min_state, physical, uncached
8cab7ccc 851 mov IA64_KR(CURRENT)=r13
7f613c7d
KO
852 ld8 r8=[temp1] // os_status
853 ld8 r10=[temp2] // context
854
20bb8685
KO
855 /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
856 * avoid any dependencies on the algorithm in ia64_switch_to(), just
857 * purge any existing CURRENT_STACK mapping and insert the new one.
858 *
8cab7ccc 859 * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
20bb8685
KO
860 * prev_IA64_KR_CURRENT, these values may have been changed by the C
861 * code. Do not use r8, r9, r10, r22, they contain values ready for
862 * the return to SAL.
863 */
864
865 mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
866 ;;
867 shl r15=r15,IA64_GRANULE_SHIFT
868 ;;
869 dep r15=-1,r15,61,3 // virtual granule
870 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
871 ;;
872 ptr.d r15,r18
873 ;;
874 srlz.d
875
8cab7ccc 876 extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
20bb8685
KO
877 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
878 movl r21=PAGE_KERNEL // page properties
879 ;;
880 mov IA64_KR(CURRENT_STACK)=r16
881 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
882 or r21=r20,r21 // construct PA | page properties
883(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
884 ;;
885 mov cr.itir=r18
8cab7ccc 886 mov cr.ifa=r13
20bb8685
KO
887 mov r20=IA64_TR_CURRENT_STACK
888 ;;
889 itr.d dtr[r20]=r21
890 ;;
891 srlz.d
8921:
893
7f613c7d 894 br.sptk b0
1da177e4
LT
895
896//EndStub//////////////////////////////////////////////////////////////////////
897
898
7f613c7d
KO
899//++
900// Name:
901// ia64_new_stack()
1da177e4 902//
7f613c7d 903// Stub Description:
1da177e4 904//
7f613c7d 905// Switch to the MCA/INIT stack.
1da177e4 906//
7f613c7d
KO
907// r2 contains the return address, r3 contains either
908// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 909//
7f613c7d
KO
910// On entry RBS is still on the original stack, this routine switches RBS
911// to use the MCA/INIT stack.
1da177e4 912//
7f613c7d
KO
913// On entry, sos->pal_min_state is physical, on exit it is virtual.
914//
915//--
1da177e4 916
7f613c7d
KO
917ia64_new_stack:
918 add regs=MCA_PT_REGS_OFFSET, r3
919 add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
920 mov b0=r2 // save return address
921 GET_IA64_MCA_DATA(temp1)
922 invala
1da177e4 923 ;;
7f613c7d
KO
924 add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
925 add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
1da177e4 926 ;;
7f613c7d
KO
927 // Address of minstate area provided by PAL is physical, uncacheable.
928 // Convert to Linux virtual address in region 6 for C code.
929 ld8 ms=[temp2] // pal_min_state, physical
1da177e4 930 ;;
7f613c7d
KO
931 dep temp1=-1,ms,62,2 // set region 6
932 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
933 ;;
934 st8 [temp2]=temp1 // pal_min_state, virtual
1da177e4 935
7f613c7d 936 add temp4=temp3, regs // start of bspstore on new stack
1da177e4 937 ;;
7f613c7d 938 mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
1da177e4 939 ;;
7f613c7d
KO
940 flushrs // must be first in group
941 br.sptk b0
942
943//EndStub//////////////////////////////////////////////////////////////////////
944
945
946//++
947// Name:
948// ia64_old_stack()
949//
950// Stub Description:
951//
952// Switch to the old stack.
953//
954// r2 contains the return address, r3 contains either
955// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
956//
957// On entry, pal_min_state is virtual, on exit it is physical.
958//
959// On entry RBS is on the MCA/INIT stack, this routine switches RBS
960// back to the previous stack.
961//
962// The psr is set to all zeroes. SAL return requires either all zeroes or
963// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
964// code does not perform correctly.
965//
966// The dirty registers at the time of the event were flushed to the
967// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
968// before reverting to the previous bspstore.
969//--
970
971ia64_old_stack:
972 add regs=MCA_PT_REGS_OFFSET, r3
973 mov b0=r2 // save return address
974 GET_IA64_MCA_DATA(temp2)
975 LOAD_PHYSICAL(p0,temp1,1f)
1da177e4 976 ;;
7f613c7d
KO
977 mov cr.ipsr=r0
978 mov cr.ifs=r0
979 mov cr.iip=temp1
1da177e4 980 ;;
7f613c7d 981 invala
1da177e4 982 rfi
7f613c7d
KO
9831:
984
985 add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
1da177e4 986 ;;
7f613c7d 987 add temp1=PT(LOADRS), regs
1da177e4 988 ;;
7f613c7d 989 ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
1da177e4 990 ;;
7f613c7d
KO
991 ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
992 mov ar.rsc=temp2
993 ;;
994 loadrs
995 ld8 temp4=[temp1] // restore ar.rnat
996 ;;
997 mov ar.bspstore=temp3 // back to old stack
998 ;;
999 mov ar.rnat=temp4
1000 ;;
1001
1002 br.sptk b0
1da177e4 1003
7f613c7d 1004//EndStub//////////////////////////////////////////////////////////////////////
1da177e4 1005
1da177e4 1006
7f613c7d
KO
1007//++
1008// Name:
1009// ia64_set_kernel_registers()
1da177e4 1010//
7f613c7d
KO
1011// Stub Description:
1012//
1013// Set the registers that are required by the C code in order to run on an
1014// MCA/INIT stack.
1015//
1016// r2 contains the return address, r3 contains either
1017// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 1018//
7f613c7d
KO
1019//--
1020
1021ia64_set_kernel_registers:
1022 add temp3=MCA_SP_OFFSET, r3
1023 add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
1024 mov b0=r2 // save return address
1025 GET_IA64_MCA_DATA(temp1)
1026 ;;
1027 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
1028 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
1029 add r13=temp1, r3 // set current to start of MCA/INIT stack
20bb8685 1030 add r20=temp1, r3 // physical start of MCA/INIT stack
7f613c7d
KO
1031 ;;
1032 ld8 r1=[temp4] // OS GP from SAL OS state
1033 ;;
1034 DATA_PA_TO_VA(r1,temp1)
1035 DATA_PA_TO_VA(r12,temp2)
1036 DATA_PA_TO_VA(r13,temp3)
1037 ;;
1038 mov IA64_KR(CURRENT)=r13
1039
20bb8685
KO
1040 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
1041 * any dependencies on the algorithm in ia64_switch_to(), just purge
1042 * any existing CURRENT_STACK mapping and insert the new one.
1043 */
1044
1045 mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
1046 ;;
1047 shl r16=r16,IA64_GRANULE_SHIFT
1048 ;;
1049 dep r16=-1,r16,61,3 // virtual granule
1050 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
1051 ;;
1052 ptr.d r16,r18
1053 ;;
1054 srlz.d
1055
1056 shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
1057 movl r21=PAGE_KERNEL // page properties
1058 ;;
1059 mov IA64_KR(CURRENT_STACK)=r16
1060 or r21=r20,r21 // construct PA | page properties
1061 ;;
1062 mov cr.itir=r18
1063 mov cr.ifa=r13
1064 mov r20=IA64_TR_CURRENT_STACK
1065 ;;
1066 itr.d dtr[r20]=r21
1067 ;;
1068 srlz.d
7f613c7d
KO
1069
1070 br.sptk b0
1071
1072//EndStub//////////////////////////////////////////////////////////////////////
1073
1074#undef ms
1075#undef regs
1076#undef temp1
1077#undef temp2
1078#undef temp3
1079#undef temp4
1080
1da177e4 1081
7f613c7d
KO
1082// Support function for mca.c, it is here to avoid using inline asm. Given the
1083// address of an rnat slot, if that address is below the current ar.bspstore
1084// then return the contents of that slot, otherwise return the contents of
1085// ar.rnat.
1086GLOBAL_ENTRY(ia64_get_rnat)
1087 alloc r14=ar.pfs,1,0,0,0
1088 mov ar.rsc=0
1089 ;;
1090 mov r14=ar.bspstore
1091 ;;
1092 cmp.lt p6,p7=in0,r14
1093 ;;
1094(p6) ld8 r8=[in0]
1095(p7) mov r8=ar.rnat
1096 mov ar.rsc=3
1097 br.ret.sptk.many rp
1098END(ia64_get_rnat)