]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/frv/kernel/break.S
Remove obsolete #include <linux/config.h>
[mirror_ubuntu-jammy-kernel.git] / arch / frv / kernel / break.S
1 /* break.S: Break interrupt handling (kept separate from entry.S)
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/sys.h>
13 #include <linux/linkage.h>
14 #include <asm/setup.h>
15 #include <asm/segment.h>
16 #include <asm/ptrace.h>
17 #include <asm/spr-regs.h>
18
19 #include <asm/errno.h>
20
21 #
22 # the break handler has its own stack
23 #
24 .section .bss.stack
25 .globl __break_user_context
26 .balign 8192
27 __break_stack:
28 .space (8192 - (USER_CONTEXT_SIZE + REG__DEBUG_XTRA)) & ~7
29 __break_stack_tos:
30 .space REG__DEBUG_XTRA
31 __break_user_context:
32 .space USER_CONTEXT_SIZE
33
34 #
35 # miscellaneous variables
36 #
37 .section .bss
38 #ifdef CONFIG_MMU
39 .globl __break_tlb_miss_real_return_info
40 __break_tlb_miss_real_return_info:
41 .balign 8
42 .space 2*4 /* saved PCSR, PSR for TLB-miss handler fixup */
43 #endif
44
45 __break_trace_through_exceptions:
46 .space 4
47
48 #define CS2_ECS1 0xe1200000
49 #define CS2_USERLED 0x4
50
51 .macro LEDS val,reg
52 # sethi.p %hi(CS2_ECS1+CS2_USERLED),gr30
53 # setlo %lo(CS2_ECS1+CS2_USERLED),gr30
54 # setlos #~\val,\reg
55 # st \reg,@(gr30,gr0)
56 # setlos #0x5555,\reg
57 # sethi.p %hi(0xffc00100),gr30
58 # setlo %lo(0xffc00100),gr30
59 # sth \reg,@(gr30,gr0)
60 # membar
61 .endm
62
63 ###############################################################################
64 #
65 # entry point for Break Exceptions/Interrupts
66 #
67 ###############################################################################
68 .text
69 .balign 4
70 .globl __entry_break
71 __entry_break:
72 #ifdef CONFIG_MMU
73 movgs gr31,scr3
74 #endif
75 LEDS 0x1001,gr31
76
77 sethi.p %hi(__break_user_context),gr31
78 setlo %lo(__break_user_context),gr31
79
80 stdi gr2,@(gr31,#REG_GR(2))
81 movsg ccr,gr3
82 sti gr3,@(gr31,#REG_CCR)
83
84 # catch the return from a TLB-miss handler that had single-step disabled
85 # traps will be enabled, so we have to do this now
86 #ifdef CONFIG_MMU
87 movsg bpcsr,gr3
88 sethi.p %hi(__break_tlb_miss_return_breaks_here),gr2
89 setlo %lo(__break_tlb_miss_return_breaks_here),gr2
90 subcc gr2,gr3,gr0,icc0
91 beq icc0,#2,__break_return_singlestep_tlbmiss
92 #endif
93
94 # determine whether we have stepped through into an exception
95 # - we need to take special action to suspend h/w single stepping if we've done
96 # that, so that the gdbstub doesn't get bogged down endlessly stepping through
97 # external interrupt handling
98 movsg bpsr,gr3
99 andicc gr3,#BPSR_BET,gr0,icc0
100 bne icc0,#2,__break_maybe_userspace /* jump if PSR.ET was 1 */
101
102 LEDS 0x1003,gr2
103
104 movsg brr,gr3
105 andicc gr3,#BRR_ST,gr0,icc0
106 andicc.p gr3,#BRR_SB,gr0,icc1
107 bne icc0,#2,__break_step /* jump if single-step caused break */
108 beq icc1,#2,__break_continue /* jump if BREAK didn't cause break */
109
110 LEDS 0x1007,gr2
111
112 # handle special breaks
113 movsg bpcsr,gr3
114
115 sethi.p %hi(__entry_return_singlestep_breaks_here),gr2
116 setlo %lo(__entry_return_singlestep_breaks_here),gr2
117 subcc gr2,gr3,gr0,icc0
118 beq icc0,#2,__break_return_singlestep
119
120 bra __break_continue
121
122
123 ###############################################################################
124 #
125 # handle BREAK instruction in kernel-mode exception epilogue
126 #
127 ###############################################################################
128 __break_return_singlestep:
129 LEDS 0x100f,gr2
130
131 # special break insn requests single-stepping to be turned back on
132 # HERE RETT
133 # PSR.ET 0 0
134 # PSR.PS old PSR.S ?
135 # PSR.S 1 1
136 # BPSR.ET 0 1 (can't have caused orig excep otherwise)
137 # BPSR.BS 1 old PSR.S
138 movsg dcr,gr2
139 sethi.p %hi(DCR_SE),gr3
140 setlo %lo(DCR_SE),gr3
141 or gr2,gr3,gr2
142 movgs gr2,dcr
143
144 movsg psr,gr2
145 andi gr2,#PSR_PS,gr2
146 slli gr2,#11,gr2 /* PSR.PS -> BPSR.BS */
147 ori gr2,#BPSR_BET,gr2 /* 1 -> BPSR.BET */
148 movgs gr2,bpsr
149
150 # return to the invoker of the original kernel exception
151 movsg pcsr,gr2
152 movgs gr2,bpcsr
153
154 LEDS 0x101f,gr2
155
156 ldi @(gr31,#REG_CCR),gr3
157 movgs gr3,ccr
158 lddi.p @(gr31,#REG_GR(2)),gr2
159 xor gr31,gr31,gr31
160 movgs gr0,brr
161 #ifdef CONFIG_MMU
162 movsg scr3,gr31
163 #endif
164 rett #1
165
166 ###############################################################################
167 #
168 # handle BREAK instruction in TLB-miss handler return path
169 #
170 ###############################################################################
171 #ifdef CONFIG_MMU
172 __break_return_singlestep_tlbmiss:
173 LEDS 0x1100,gr2
174
175 sethi.p %hi(__break_tlb_miss_real_return_info),gr3
176 setlo %lo(__break_tlb_miss_real_return_info),gr3
177 lddi @(gr3,#0),gr2
178 movgs gr2,pcsr
179 movgs gr3,psr
180
181 bra __break_return_singlestep
182 #endif
183
184
185 ###############################################################################
186 #
187 # handle single stepping into an exception prologue from kernel mode
188 # - we try and catch it whilst it is still in the main vector table
189 # - if we catch it there, we have to jump to the fixup handler
190 # - there is a fixup table that has a pointer for every 16b slot in the trap
191 # table
192 #
193 ###############################################################################
194 __break_step:
195 LEDS 0x2003,gr2
196
197 # external interrupts seem to escape from the trap table before single
198 # step catches up with them
199 movsg bpcsr,gr2
200 sethi.p %hi(__entry_kernel_external_interrupt),gr3
201 setlo %lo(__entry_kernel_external_interrupt),gr3
202 subcc.p gr2,gr3,gr0,icc0
203 sethi %hi(__entry_uspace_external_interrupt),gr3
204 setlo.p %lo(__entry_uspace_external_interrupt),gr3
205 beq icc0,#2,__break_step_kernel_external_interrupt
206 subcc.p gr2,gr3,gr0,icc0
207 sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3
208 setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3
209 beq icc0,#2,__break_step_uspace_external_interrupt
210 subcc.p gr2,gr3,gr0,icc0
211 sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3
212 setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3
213 beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled
214 subcc gr2,gr3,gr0,icc0
215 beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable
216
217 LEDS 0x2007,gr2
218
219 # the two main vector tables are adjacent on one 8Kb slab
220 movsg bpcsr,gr2
221 setlos #0xffffe000,gr3
222 and gr2,gr3,gr2
223 sethi.p %hi(__trap_tables),gr3
224 setlo %lo(__trap_tables),gr3
225 subcc gr2,gr3,gr0,icc0
226 bne icc0,#2,__break_continue
227
228 LEDS 0x200f,gr2
229
230 # skip workaround if so requested by GDB
231 sethi.p %hi(__break_trace_through_exceptions),gr3
232 setlo %lo(__break_trace_through_exceptions),gr3
233 ld @(gr3,gr0),gr3
234 subcc gr3,gr0,gr0,icc0
235 bne icc0,#0,__break_continue
236
237 LEDS 0x201f,gr2
238
239 # access the fixup table - there's a 1:1 mapping between the slots in the trap tables and
240 # the slots in the trap fixup tables allowing us to simply divide the offset into the
241 # former by 4 to access the latter
242 sethi.p %hi(__trap_tables),gr3
243 setlo %lo(__trap_tables),gr3
244 movsg bpcsr,gr2
245 sub gr2,gr3,gr2
246 srli.p gr2,#2,gr2
247
248 sethi %hi(__trap_fixup_tables),gr3
249 setlo.p %lo(__trap_fixup_tables),gr3
250 andi gr2,#~3,gr2
251 ld @(gr2,gr3),gr2
252 jmpil @(gr2,#0)
253
254 # step through an internal exception from kernel mode
255 .globl __break_step_kernel_softprog_interrupt
256 __break_step_kernel_softprog_interrupt:
257 sethi.p %hi(__entry_kernel_softprog_interrupt_reentry),gr3
258 setlo %lo(__entry_kernel_softprog_interrupt_reentry),gr3
259 bra __break_return_as_kernel_prologue
260
261 # step through an external interrupt from kernel mode
262 .globl __break_step_kernel_external_interrupt
263 __break_step_kernel_external_interrupt:
264 # deal with virtual interrupt disablement
265 beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled
266
267 sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3
268 setlo %lo(__entry_kernel_external_interrupt_reentry),gr3
269
270 __break_return_as_kernel_prologue:
271 LEDS 0x203f,gr2
272
273 movgs gr3,bpcsr
274
275 # do the bit we had to skip
276 #ifdef CONFIG_MMU
277 movsg ear0,gr2 /* EAR0 can get clobbered by gdb-stub (ICI/ICEI) */
278 movgs gr2,scr2
279 #endif
280
281 or.p sp,gr0,gr2 /* set up the stack pointer */
282 subi sp,#REG__END,sp
283 sti.p gr2,@(sp,#REG_SP)
284
285 setlos #REG__STATUS_STEP,gr2
286 sti gr2,@(sp,#REG__STATUS) /* record single step status */
287
288 # cancel single-stepping mode
289 movsg dcr,gr2
290 sethi.p %hi(~DCR_SE),gr3
291 setlo %lo(~DCR_SE),gr3
292 and gr2,gr3,gr2
293 movgs gr2,dcr
294
295 LEDS 0x207f,gr2
296
297 ldi @(gr31,#REG_CCR),gr3
298 movgs gr3,ccr
299 lddi.p @(gr31,#REG_GR(2)),gr2
300 xor gr31,gr31,gr31
301 movgs gr0,brr
302 #ifdef CONFIG_MMU
303 movsg scr3,gr31
304 #endif
305 rett #1
306
307 # we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled
308 # need to really disable interrupts, set flag, fix up and return
309 __break_step_kernel_external_interrupt_virtually_disabled:
310 movsg psr,gr2
311 andi gr2,#~PSR_PIL,gr2
312 ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */
313 movgs gr2,psr
314
315 ldi @(gr31,#REG_CCR),gr3
316 movgs gr3,ccr
317 subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */
318
319 # exceptions must've been enabled and we must've been in supervisor mode
320 setlos BPSR_BET|BPSR_BS,gr3
321 movgs gr3,bpsr
322
323 # return to where the interrupt happened
324 movsg pcsr,gr2
325 movgs gr2,bpcsr
326
327 lddi.p @(gr31,#REG_GR(2)),gr2
328
329 xor gr31,gr31,gr31
330 movgs gr0,brr
331 #ifdef CONFIG_MMU
332 movsg scr3,gr31
333 #endif
334 rett #1
335
336 # we stepped through into the virtual interrupt reenablement trap
337 #
338 # we also want to single step anyway, but after fixing up so that we get an event on the
339 # instruction after the broken-into exception returns
340 .globl __break_step_kernel_external_interrupt_virtual_reenable
341 __break_step_kernel_external_interrupt_virtual_reenable:
342 movsg psr,gr2
343 andi gr2,#~PSR_PIL,gr2
344 movgs gr2,psr
345
346 ldi @(gr31,#REG_CCR),gr3
347 movgs gr3,ccr
348 subicc gr0,#1,gr0,icc2 /* clear Z, set C */
349
350 # save the adjusted ICC2
351 movsg ccr,gr3
352 sti gr3,@(gr31,#REG_CCR)
353
354 # exceptions must've been enabled and we must've been in supervisor mode
355 setlos BPSR_BET|BPSR_BS,gr3
356 movgs gr3,bpsr
357
358 # return to where the trap happened
359 movsg pcsr,gr2
360 movgs gr2,bpcsr
361
362 # and then process the single step
363 bra __break_continue
364
365 # step through an internal exception from uspace mode
366 .globl __break_step_uspace_softprog_interrupt
367 __break_step_uspace_softprog_interrupt:
368 sethi.p %hi(__entry_uspace_softprog_interrupt_reentry),gr3
369 setlo %lo(__entry_uspace_softprog_interrupt_reentry),gr3
370 bra __break_return_as_uspace_prologue
371
372 # step through an external interrupt from kernel mode
373 .globl __break_step_uspace_external_interrupt
374 __break_step_uspace_external_interrupt:
375 sethi.p %hi(__entry_uspace_external_interrupt_reentry),gr3
376 setlo %lo(__entry_uspace_external_interrupt_reentry),gr3
377
378 __break_return_as_uspace_prologue:
379 LEDS 0x20ff,gr2
380
381 movgs gr3,bpcsr
382
383 # do the bit we had to skip
384 sethi.p %hi(__kernel_frame0_ptr),gr28
385 setlo %lo(__kernel_frame0_ptr),gr28
386 ldi.p @(gr28,#0),gr28
387
388 setlos #REG__STATUS_STEP,gr2
389 sti gr2,@(gr28,#REG__STATUS) /* record single step status */
390
391 # cancel single-stepping mode
392 movsg dcr,gr2
393 sethi.p %hi(~DCR_SE),gr3
394 setlo %lo(~DCR_SE),gr3
395 and gr2,gr3,gr2
396 movgs gr2,dcr
397
398 LEDS 0x20fe,gr2
399
400 ldi @(gr31,#REG_CCR),gr3
401 movgs gr3,ccr
402 lddi.p @(gr31,#REG_GR(2)),gr2
403 xor gr31,gr31,gr31
404 movgs gr0,brr
405 #ifdef CONFIG_MMU
406 movsg scr3,gr31
407 #endif
408 rett #1
409
410 #ifdef CONFIG_MMU
411 # step through an ITLB-miss handler from user mode
412 .globl __break_user_insn_tlb_miss
413 __break_user_insn_tlb_miss:
414 # we'll want to try the trap stub again
415 sethi.p %hi(__trap_user_insn_tlb_miss),gr2
416 setlo %lo(__trap_user_insn_tlb_miss),gr2
417 movgs gr2,bpcsr
418
419 __break_tlb_miss_common:
420 LEDS 0x2101,gr2
421
422 # cancel single-stepping mode
423 movsg dcr,gr2
424 sethi.p %hi(~DCR_SE),gr3
425 setlo %lo(~DCR_SE),gr3
426 and gr2,gr3,gr2
427 movgs gr2,dcr
428
429 # we'll swap the real return address for one with a BREAK insn so that we can re-enable
430 # single stepping on return
431 movsg pcsr,gr2
432 sethi.p %hi(__break_tlb_miss_real_return_info),gr3
433 setlo %lo(__break_tlb_miss_real_return_info),gr3
434 sti gr2,@(gr3,#0)
435
436 sethi.p %hi(__break_tlb_miss_return_break),gr2
437 setlo %lo(__break_tlb_miss_return_break),gr2
438 movgs gr2,pcsr
439
440 # we also have to fudge PSR because the return BREAK is in kernel space and we want
441 # to get a BREAK fault not an access violation should the return be to userspace
442 movsg psr,gr2
443 sti.p gr2,@(gr3,#4)
444 ori gr2,#PSR_PS,gr2
445 movgs gr2,psr
446
447 LEDS 0x2102,gr2
448
449 ldi @(gr31,#REG_CCR),gr3
450 movgs gr3,ccr
451 lddi @(gr31,#REG_GR(2)),gr2
452 movsg scr3,gr31
453 movgs gr0,brr
454 rett #1
455
456 # step through a DTLB-miss handler from user mode
457 .globl __break_user_data_tlb_miss
458 __break_user_data_tlb_miss:
459 # we'll want to try the trap stub again
460 sethi.p %hi(__trap_user_data_tlb_miss),gr2
461 setlo %lo(__trap_user_data_tlb_miss),gr2
462 movgs gr2,bpcsr
463 bra __break_tlb_miss_common
464
465 # step through an ITLB-miss handler from kernel mode
466 .globl __break_kernel_insn_tlb_miss
467 __break_kernel_insn_tlb_miss:
468 # we'll want to try the trap stub again
469 sethi.p %hi(__trap_kernel_insn_tlb_miss),gr2
470 setlo %lo(__trap_kernel_insn_tlb_miss),gr2
471 movgs gr2,bpcsr
472 bra __break_tlb_miss_common
473
474 # step through a DTLB-miss handler from kernel mode
475 .globl __break_kernel_data_tlb_miss
476 __break_kernel_data_tlb_miss:
477 # we'll want to try the trap stub again
478 sethi.p %hi(__trap_kernel_data_tlb_miss),gr2
479 setlo %lo(__trap_kernel_data_tlb_miss),gr2
480 movgs gr2,bpcsr
481 bra __break_tlb_miss_common
482 #endif
483
484 ###############################################################################
485 #
486 # handle debug events originating with userspace
487 #
488 ###############################################################################
489 __break_maybe_userspace:
490 LEDS 0x3003,gr2
491
492 setlos #BPSR_BS,gr2
493 andcc gr3,gr2,gr0,icc0
494 bne icc0,#0,__break_continue /* skip if PSR.S was 1 */
495
496 movsg brr,gr2
497 andicc gr2,#BRR_ST|BRR_SB,gr0,icc0
498 beq icc0,#0,__break_continue /* jump if not BREAK or single-step */
499
500 LEDS 0x3007,gr2
501
502 # do the first part of the exception prologue here
503 sethi.p %hi(__kernel_frame0_ptr),gr28
504 setlo %lo(__kernel_frame0_ptr),gr28
505 ldi @(gr28,#0),gr28
506 andi gr28,#~7,gr28
507
508 # set up the kernel stack pointer
509 sti sp ,@(gr28,#REG_SP)
510 ori gr28,0,sp
511 sti gr0 ,@(gr28,#REG_GR(28))
512
513 stdi gr20,@(gr28,#REG_GR(20))
514 stdi gr22,@(gr28,#REG_GR(22))
515
516 movsg tbr,gr20
517 movsg bpcsr,gr21
518 movsg psr,gr22
519
520 # determine the exception type and cancel single-stepping mode
521 or gr0,gr0,gr23
522
523 movsg dcr,gr2
524 sethi.p %hi(DCR_SE),gr3
525 setlo %lo(DCR_SE),gr3
526 andcc gr2,gr3,gr0,icc0
527 beq icc0,#0,__break_no_user_sstep /* must have been a BREAK insn */
528
529 not gr3,gr3
530 and gr2,gr3,gr2
531 movgs gr2,dcr
532 ori gr23,#REG__STATUS_STEP,gr23
533
534 __break_no_user_sstep:
535 LEDS 0x300f,gr2
536
537 movsg brr,gr2
538 andi gr2,#BRR_ST|BRR_SB,gr2
539 slli gr2,#1,gr2
540 or gr23,gr2,gr23
541 sti.p gr23,@(gr28,#REG__STATUS) /* record single step status */
542
543 # adjust the value acquired from TBR - this indicates the exception
544 setlos #~TBR_TT,gr2
545 and.p gr20,gr2,gr20
546 setlos #TBR_TT_BREAK,gr2
547 or.p gr20,gr2,gr20
548
549 # fudge PSR.PS and BPSR.BS to return to kernel mode through the trap
550 # table as trap 126
551 andi gr22,#~PSR_PS,gr22 /* PSR.PS should be 0 */
552 movgs gr22,psr
553
554 setlos #BPSR_BS,gr2 /* BPSR.BS should be 1 and BPSR.BET 0 */
555 movgs gr2,bpsr
556
557 # return through remainder of the exception prologue
558 # - need to load gr23 with return handler address
559 sethi.p %hi(__entry_return_from_user_exception),gr23
560 setlo %lo(__entry_return_from_user_exception),gr23
561 sethi.p %hi(__entry_common),gr3
562 setlo %lo(__entry_common),gr3
563 movgs gr3,bpcsr
564
565 LEDS 0x301f,gr2
566
567 ldi @(gr31,#REG_CCR),gr3
568 movgs gr3,ccr
569 lddi.p @(gr31,#REG_GR(2)),gr2
570 xor gr31,gr31,gr31
571 movgs gr0,brr
572 #ifdef CONFIG_MMU
573 movsg scr3,gr31
574 #endif
575 rett #1
576
577 ###############################################################################
578 #
579 # resume normal debug-mode entry
580 #
581 ###############################################################################
582 __break_continue:
583 LEDS 0x4003,gr2
584
585 # set up the kernel stack pointer
586 sti sp,@(gr31,#REG_SP)
587
588 sethi.p %hi(__break_stack_tos),sp
589 setlo %lo(__break_stack_tos),sp
590
591 # finish building the exception frame
592 stdi gr4 ,@(gr31,#REG_GR(4))
593 stdi gr6 ,@(gr31,#REG_GR(6))
594 stdi gr8 ,@(gr31,#REG_GR(8))
595 stdi gr10,@(gr31,#REG_GR(10))
596 stdi gr12,@(gr31,#REG_GR(12))
597 stdi gr14,@(gr31,#REG_GR(14))
598 stdi gr16,@(gr31,#REG_GR(16))
599 stdi gr18,@(gr31,#REG_GR(18))
600 stdi gr20,@(gr31,#REG_GR(20))
601 stdi gr22,@(gr31,#REG_GR(22))
602 stdi gr24,@(gr31,#REG_GR(24))
603 stdi gr26,@(gr31,#REG_GR(26))
604 sti gr0 ,@(gr31,#REG_GR(28)) /* NULL frame pointer */
605 sti gr29,@(gr31,#REG_GR(29))
606 sti gr30,@(gr31,#REG_GR(30))
607 sti gr8 ,@(gr31,#REG_ORIG_GR8)
608
609 #ifdef CONFIG_MMU
610 movsg scr3,gr19
611 sti gr19,@(gr31,#REG_GR(31))
612 #endif
613
614 movsg bpsr ,gr19
615 movsg tbr ,gr20
616 movsg bpcsr,gr21
617 movsg psr ,gr22
618 movsg isr ,gr23
619 movsg cccr ,gr25
620 movsg lr ,gr26
621 movsg lcr ,gr27
622
623 andi.p gr22,#~(PSR_S|PSR_ET),gr5 /* rebuild PSR */
624 andi gr19,#PSR_ET,gr4
625 or.p gr4,gr5,gr5
626 srli gr19,#10,gr4
627 andi gr4,#PSR_S,gr4
628 or.p gr4,gr5,gr5
629
630 setlos #-1,gr6
631 sti gr20,@(gr31,#REG_TBR)
632 sti gr21,@(gr31,#REG_PC)
633 sti gr5 ,@(gr31,#REG_PSR)
634 sti gr23,@(gr31,#REG_ISR)
635 sti gr25,@(gr31,#REG_CCCR)
636 stdi gr26,@(gr31,#REG_LR)
637 sti gr6 ,@(gr31,#REG_SYSCALLNO)
638
639 # store CPU-specific regs
640 movsg iacc0h,gr4
641 movsg iacc0l,gr5
642 stdi gr4,@(gr31,#REG_IACC0)
643
644 movsg gner0,gr4
645 movsg gner1,gr5
646 stdi gr4,@(gr31,#REG_GNER0)
647
648 # build the debug register frame
649 movsg brr,gr4
650 movgs gr0,brr
651 movsg nmar,gr5
652 movsg dcr,gr6
653
654 stdi gr4 ,@(gr31,#REG_BRR)
655 sti gr19,@(gr31,#REG_BPSR)
656 sti.p gr6 ,@(gr31,#REG_DCR)
657
658 # trap exceptions during break handling and disable h/w breakpoints/watchpoints
659 sethi %hi(DCR_EBE),gr5
660 setlo.p %lo(DCR_EBE),gr5
661 sethi %hi(__entry_breaktrap_table),gr4
662 setlo %lo(__entry_breaktrap_table),gr4
663 movgs gr5,dcr
664 movgs gr4,tbr
665
666 # set up kernel global registers
667 sethi.p %hi(__kernel_current_task),gr5
668 setlo %lo(__kernel_current_task),gr5
669 ld @(gr5,gr0),gr29
670 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
671
672 sethi %hi(_gp),gr16
673 setlo.p %lo(_gp),gr16
674
675 # make sure we (the kernel) get div-zero and misalignment exceptions
676 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
677 movgs gr5,isr
678
679 # enter the GDB stub
680 LEDS 0x4007,gr2
681
682 or.p gr0,gr0,fp
683 call debug_stub
684
685 LEDS 0x403f,gr2
686
687 # return from break
688 lddi @(gr31,#REG_IACC0),gr4
689 movgs gr4,iacc0h
690 movgs gr5,iacc0l
691
692 lddi @(gr31,#REG_GNER0),gr4
693 movgs gr4,gner0
694 movgs gr5,gner1
695
696 lddi @(gr31,#REG_LR) ,gr26
697 lddi @(gr31,#REG_CCR) ,gr24
698 lddi @(gr31,#REG_PSR) ,gr22
699 ldi @(gr31,#REG_PC) ,gr21
700 ldi @(gr31,#REG_TBR) ,gr20
701 ldi.p @(gr31,#REG_DCR) ,gr6
702
703 andi gr22,#PSR_S,gr19 /* rebuild BPSR */
704 andi.p gr22,#PSR_ET,gr5
705 slli gr19,#10,gr19
706 or gr5,gr19,gr19
707
708 movgs gr6 ,dcr
709 movgs gr19,bpsr
710 movgs gr20,tbr
711 movgs gr21,bpcsr
712 movgs gr23,isr
713 movgs gr24,ccr
714 movgs gr25,cccr
715 movgs gr26,lr
716 movgs gr27,lcr
717
718 LEDS 0x407f,gr2
719
720 #ifdef CONFIG_MMU
721 ldi @(gr31,#REG_GR(31)),gr2
722 movgs gr2,scr3
723 #endif
724
725 ldi @(gr31,#REG_GR(30)),gr30
726 ldi @(gr31,#REG_GR(29)),gr29
727 lddi @(gr31,#REG_GR(26)),gr26
728 lddi @(gr31,#REG_GR(24)),gr24
729 lddi @(gr31,#REG_GR(22)),gr22
730 lddi @(gr31,#REG_GR(20)),gr20
731 lddi @(gr31,#REG_GR(18)),gr18
732 lddi @(gr31,#REG_GR(16)),gr16
733 lddi @(gr31,#REG_GR(14)),gr14
734 lddi @(gr31,#REG_GR(12)),gr12
735 lddi @(gr31,#REG_GR(10)),gr10
736 lddi @(gr31,#REG_GR(8)) ,gr8
737 lddi @(gr31,#REG_GR(6)) ,gr6
738 lddi @(gr31,#REG_GR(4)) ,gr4
739 lddi @(gr31,#REG_GR(2)) ,gr2
740 ldi.p @(gr31,#REG_SP) ,sp
741
742 xor gr31,gr31,gr31
743 movgs gr0,brr
744 #ifdef CONFIG_MMU
745 movsg scr3,gr31
746 #endif
747 rett #1
748
749 ###################################################################################################
750 #
751 # GDB stub "system calls"
752 #
753 ###################################################################################################
754
755 #ifdef CONFIG_GDBSTUB
756 # void gdbstub_console_write(struct console *con, const char *p, unsigned n)
757 .globl gdbstub_console_write
758 gdbstub_console_write:
759 break
760 bralr
761 #endif
762
763 # GDB stub BUG() trap
764 # GR8 is the proposed signal number
765 .globl __debug_bug_trap
766 __debug_bug_trap:
767 break
768 bralr
769
770 # transfer kernel exeception to GDB for handling
771 .globl __break_hijack_kernel_event
772 __break_hijack_kernel_event:
773 break
774 .globl __break_hijack_kernel_event_breaks_here
775 __break_hijack_kernel_event_breaks_here:
776 nop
777
778 #ifdef CONFIG_MMU
779 # handle a return from TLB-miss that requires single-step reactivation
780 .globl __break_tlb_miss_return_break
781 __break_tlb_miss_return_break:
782 break
783 __break_tlb_miss_return_breaks_here:
784 nop
785 #endif
786
787 # guard the first .text label in the next file from confusion
788 nop