]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/avr32/kernel/entry-avr32b.S
[AVR32] Clean up OCD register usage
[mirror_ubuntu-jammy-kernel.git] / arch / avr32 / kernel / entry-avr32b.S
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14 #include <linux/errno.h>
15
16 #include <asm/asm.h>
17 #include <asm/hardirq.h>
18 #include <asm/irq.h>
19 #include <asm/ocd.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
26
27 #ifdef CONFIG_PREEMPT
28 # define preempt_stop mask_interrupts
29 #else
30 # define preempt_stop
31 # define fault_resume_kernel fault_restore_all
32 #endif
33
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40 exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
85
86 .section .tlbx.ex.text,"ax",@progbits
87 .global itlb_miss
88 itlb_miss:
89 tlbmiss_save
90 rjmp tlb_miss_common
91
92 .section .tlbr.ex.text,"ax",@progbits
93 dtlb_miss_read:
94 tlbmiss_save
95 rjmp tlb_miss_common
96
97 .section .tlbw.ex.text,"ax",@progbits
98 dtlb_miss_write:
99 tlbmiss_save
100
101 .global tlb_miss_common
102 tlb_miss_common:
103 mfsr r0, SYSREG_TLBEAR
104 mfsr r1, SYSREG_PTBR
105
106 /* Is it the vmalloc space? */
107 bld r0, 31
108 brcs handle_vmalloc_miss
109
110 /* First level lookup */
111 pgtbl_lookup:
112 lsr r2, r0, PGDIR_SHIFT
113 ld.w r3, r1[r2 << 2]
114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT
116 brcc page_table_not_present
117
118 /* Translate to virtual address in P1. */
119 andl r3, 0xf000
120 sbr r3, 31
121
122 /* Second level lookup */
123 ld.w r2, r3[r1 << 2]
124 mfsr r0, SYSREG_TLBARLO
125 bld r2, _PAGE_BIT_PRESENT
126 brcc page_not_present
127
128 /* Mark the page as accessed */
129 sbr r2, _PAGE_BIT_ACCESSED
130 st.w r3[r1 << 2], r2
131
132 /* Drop software flags */
133 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
134 mtsr SYSREG_TLBELO, r2
135
136 /* Figure out which entry we want to replace */
137 mfsr r1, SYSREG_MMUCR
138 clz r2, r0
139 brcc 1f
140 mov r3, -1 /* All entries have been accessed, */
141 mov r2, 0 /* so start at 0 */
142 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
143
144 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
145 mtsr SYSREG_MMUCR, r1
146 tlbw
147
148 tlbmiss_restore
149 rete
150
151 handle_vmalloc_miss:
152 /* Simply do the lookup in init's page table */
153 mov r1, lo(swapper_pg_dir)
154 orh r1, hi(swapper_pg_dir)
155 rjmp pgtbl_lookup
156
157
158 /* --- System Call --- */
159
160 .section .scall.text,"ax",@progbits
161 system_call:
162 #ifdef CONFIG_PREEMPT
163 mask_interrupts
164 #endif
165 pushm r12 /* r12_orig */
166 stmts --sp, r0-lr
167
168 mfsr r0, SYSREG_RAR_SUP
169 mfsr r1, SYSREG_RSR_SUP
170 #ifdef CONFIG_PREEMPT
171 unmask_interrupts
172 #endif
173 zero_fp
174 stm --sp, r0-r1
175
176 /* check for syscall tracing */
177 get_thread_info r0
178 ld.w r1, r0[TI_flags]
179 bld r1, TIF_SYSCALL_TRACE
180 brcs syscall_trace_enter
181
182 syscall_trace_cont:
183 cp.w r8, NR_syscalls
184 brhs syscall_badsys
185
186 lddpc lr, syscall_table_addr
187 ld.w lr, lr[r8 << 2]
188 mov r8, r5 /* 5th argument (6th is pushed by stub) */
189 icall lr
190
191 .global syscall_return
192 syscall_return:
193 get_thread_info r0
194 mask_interrupts /* make sure we don't miss an interrupt
195 setting need_resched or sigpending
196 between sampling and the rets */
197
198 /* Store the return value so that the correct value is loaded below */
199 stdsp sp[REG_R12], r12
200
201 ld.w r1, r0[TI_flags]
202 andl r1, _TIF_ALLWORK_MASK, COH
203 brne syscall_exit_work
204
205 syscall_exit_cont:
206 popm r8-r9
207 mtsr SYSREG_RAR_SUP, r8
208 mtsr SYSREG_RSR_SUP, r9
209 ldmts sp++, r0-lr
210 sub sp, -4 /* r12_orig */
211 rets
212
213 .align 2
214 syscall_table_addr:
215 .long sys_call_table
216
217 syscall_badsys:
218 mov r12, -ENOSYS
219 rjmp syscall_return
220
221 .global ret_from_fork
222 ret_from_fork:
223 rcall schedule_tail
224
225 /* check for syscall tracing */
226 get_thread_info r0
227 ld.w r1, r0[TI_flags]
228 andl r1, _TIF_ALLWORK_MASK, COH
229 brne syscall_exit_work
230 rjmp syscall_exit_cont
231
232 syscall_trace_enter:
233 pushm r8-r12
234 rcall syscall_trace
235 popm r8-r12
236 rjmp syscall_trace_cont
237
238 syscall_exit_work:
239 bld r1, TIF_SYSCALL_TRACE
240 brcc 1f
241 unmask_interrupts
242 rcall syscall_trace
243 mask_interrupts
244 ld.w r1, r0[TI_flags]
245
246 1: bld r1, TIF_NEED_RESCHED
247 brcc 2f
248 unmask_interrupts
249 rcall schedule
250 mask_interrupts
251 ld.w r1, r0[TI_flags]
252 rjmp 1b
253
254 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
255 tst r1, r2
256 breq 3f
257 unmask_interrupts
258 mov r12, sp
259 mov r11, r0
260 rcall do_notify_resume
261 mask_interrupts
262 ld.w r1, r0[TI_flags]
263 rjmp 1b
264
265 3: bld r1, TIF_BREAKPOINT
266 brcc syscall_exit_cont
267 mfsr r3, SYSREG_TLBEHI
268 lddsp r2, sp[REG_PC]
269 andl r3, 0xff, COH
270 lsl r3, 1
271 sbr r3, 30
272 sbr r3, 0
273 mtdr OCD_BWA2A, r2
274 mtdr OCD_BWC2A, r3
275 rjmp syscall_exit_cont
276
277
278 /* The slow path of the TLB miss handler */
279 page_table_not_present:
280 page_not_present:
281 tlbmiss_restore
282 sub sp, 4
283 stmts --sp, r0-lr
284 rcall save_full_context_ex
285 mfsr r12, SYSREG_ECR
286 mov r11, sp
287 rcall do_page_fault
288 rjmp ret_from_exception
289
290 /* This function expects to find offending PC in SYSREG_RAR_EX */
291 save_full_context_ex:
292 mfsr r8, SYSREG_RSR_EX
293 mov r12, r8
294 andh r8, (MODE_MASK >> 16), COH
295 mfsr r11, SYSREG_RAR_EX
296 brne 2f
297
298 1: pushm r11, r12 /* PC and SR */
299 unmask_exceptions
300 ret r12
301
302 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
303 stdsp sp[4], r10 /* replace saved SP */
304 rjmp 1b
305
306 /* Low-level exception handlers */
307 handle_critical:
308 pushm r12
309 pushm r0-r12
310 rcall save_full_context_ex
311 mfsr r12, SYSREG_ECR
312 mov r11, sp
313 rcall do_critical_exception
314
315 /* We should never get here... */
316 bad_return:
317 sub r12, pc, (. - 1f)
318 bral panic
319 .align 2
320 1: .asciz "Return from critical exception!"
321
322 .align 1
323 do_bus_error_write:
324 sub sp, 4
325 stmts --sp, r0-lr
326 rcall save_full_context_ex
327 mov r11, 1
328 rjmp 1f
329
330 do_bus_error_read:
331 sub sp, 4
332 stmts --sp, r0-lr
333 rcall save_full_context_ex
334 mov r11, 0
335 1: mfsr r12, SYSREG_BEAR
336 mov r10, sp
337 rcall do_bus_error
338 rjmp ret_from_exception
339
340 .align 1
341 do_nmi_ll:
342 sub sp, 4
343 stmts --sp, r0-lr
344 mfsr r9, SYSREG_RSR_NMI
345 mfsr r8, SYSREG_RAR_NMI
346 bfextu r0, r9, MODE_SHIFT, 3
347 brne 2f
348
349 1: pushm r8, r9 /* PC and SR */
350 mfsr r12, SYSREG_ECR
351 mov r11, sp
352 rcall do_nmi
353 popm r8-r9
354 mtsr SYSREG_RAR_NMI, r8
355 tst r0, r0
356 mtsr SYSREG_RSR_NMI, r9
357 brne 3f
358
359 ldmts sp++, r0-lr
360 sub sp, -4 /* skip r12_orig */
361 rete
362
363 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
364 stdsp sp[4], r10 /* replace saved SP */
365 rjmp 1b
366
367 3: popm lr
368 sub sp, -4 /* skip sp */
369 popm r0-r12
370 sub sp, -4 /* skip r12_orig */
371 rete
372
373 handle_address_fault:
374 sub sp, 4
375 stmts --sp, r0-lr
376 rcall save_full_context_ex
377 mfsr r12, SYSREG_ECR
378 mov r11, sp
379 rcall do_address_exception
380 rjmp ret_from_exception
381
382 handle_protection_fault:
383 sub sp, 4
384 stmts --sp, r0-lr
385 rcall save_full_context_ex
386 mfsr r12, SYSREG_ECR
387 mov r11, sp
388 rcall do_page_fault
389 rjmp ret_from_exception
390
391 .align 1
392 do_illegal_opcode_ll:
393 sub sp, 4
394 stmts --sp, r0-lr
395 rcall save_full_context_ex
396 mfsr r12, SYSREG_ECR
397 mov r11, sp
398 rcall do_illegal_opcode
399 rjmp ret_from_exception
400
401 do_dtlb_modified:
402 pushm r0-r3
403 mfsr r1, SYSREG_TLBEAR
404 mfsr r0, SYSREG_PTBR
405 lsr r2, r1, PGDIR_SHIFT
406 ld.w r0, r0[r2 << 2]
407 lsl r1, (32 - PGDIR_SHIFT)
408 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
409
410 /* Translate to virtual address in P1 */
411 andl r0, 0xf000
412 sbr r0, 31
413 add r2, r0, r1 << 2
414 ld.w r3, r2[0]
415 sbr r3, _PAGE_BIT_DIRTY
416 mov r0, r3
417 st.w r2[0], r3
418
419 /* The page table is up-to-date. Update the TLB entry as well */
420 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
421 mtsr SYSREG_TLBELO, r0
422
423 /* MMUCR[DRP] is updated automatically, so let's go... */
424 tlbw
425
426 popm r0-r3
427 rete
428
429 do_fpe_ll:
430 sub sp, 4
431 stmts --sp, r0-lr
432 rcall save_full_context_ex
433 unmask_interrupts
434 mov r12, 26
435 mov r11, sp
436 rcall do_fpe
437 rjmp ret_from_exception
438
439 ret_from_exception:
440 mask_interrupts
441 lddsp r4, sp[REG_SR]
442 andh r4, (MODE_MASK >> 16), COH
443 brne fault_resume_kernel
444
445 get_thread_info r0
446 ld.w r1, r0[TI_flags]
447 andl r1, _TIF_WORK_MASK, COH
448 brne fault_exit_work
449
450 fault_resume_user:
451 popm r8-r9
452 mask_exceptions
453 mtsr SYSREG_RAR_EX, r8
454 mtsr SYSREG_RSR_EX, r9
455 ldmts sp++, r0-lr
456 sub sp, -4
457 rete
458
459 fault_resume_kernel:
460 #ifdef CONFIG_PREEMPT
461 get_thread_info r0
462 ld.w r2, r0[TI_preempt_count]
463 cp.w r2, 0
464 brne 1f
465 ld.w r1, r0[TI_flags]
466 bld r1, TIF_NEED_RESCHED
467 brcc 1f
468 lddsp r4, sp[REG_SR]
469 bld r4, SYSREG_GM_OFFSET
470 brcs 1f
471 rcall preempt_schedule_irq
472 1:
473 #endif
474
475 popm r8-r9
476 mask_exceptions
477 mfsr r1, SYSREG_SR
478 mtsr SYSREG_RAR_EX, r8
479 mtsr SYSREG_RSR_EX, r9
480 popm lr
481 sub sp, -4 /* ignore SP */
482 popm r0-r12
483 sub sp, -4 /* ignore r12_orig */
484 rete
485
486 irq_exit_work:
487 /* Switch to exception mode so that we can share the same code. */
488 mfsr r8, SYSREG_SR
489 cbr r8, SYSREG_M0_OFFSET
490 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
491 mtsr SYSREG_SR, r8
492 sub pc, -2
493 get_thread_info r0
494 ld.w r1, r0[TI_flags]
495
496 fault_exit_work:
497 bld r1, TIF_NEED_RESCHED
498 brcc 1f
499 unmask_interrupts
500 rcall schedule
501 mask_interrupts
502 ld.w r1, r0[TI_flags]
503 rjmp fault_exit_work
504
505 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
506 tst r1, r2
507 breq 2f
508 unmask_interrupts
509 mov r12, sp
510 mov r11, r0
511 rcall do_notify_resume
512 mask_interrupts
513 ld.w r1, r0[TI_flags]
514 rjmp fault_exit_work
515
516 2: bld r1, TIF_BREAKPOINT
517 brcc fault_resume_user
518 mfsr r3, SYSREG_TLBEHI
519 lddsp r2, sp[REG_PC]
520 andl r3, 0xff, COH
521 lsl r3, 1
522 sbr r3, 30
523 sbr r3, 0
524 mtdr OCD_BWA2A, r2
525 mtdr OCD_BWC2A, r3
526 rjmp fault_resume_user
527
528 /* If we get a debug trap from privileged context we end up here */
529 handle_debug_priv:
530 /* Fix up LR and SP in regs. r1 contains the mode we came from */
531 mfsr r2, SYSREG_SR
532 mov r3, r2
533 bfins r2, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
534 mtsr SYSREG_SR, r2
535 sub pc, -2
536 stdsp sp[REG_LR], lr
537 mtsr SYSREG_SR, r3
538 sub pc, -2
539 sub r10, sp, -FRAME_SIZE_FULL
540 stdsp sp[REG_SP], r10
541 mov r12, sp
542 rcall do_debug_priv
543
544 /* Now, put everything back */
545 ssrf SR_EM_BIT
546 popm r10, r11
547 mtsr SYSREG_RAR_DBG, r10
548 mtsr SYSREG_RSR_DBG, r11
549 #ifdef CONFIG_TRACE_IRQFLAGS
550 bld r11, SYSREG_GM_OFFSET
551 brcc 1f
552 rcall trace_hardirqs_on
553 1:
554 #endif
555 mfsr r2, SYSREG_SR
556 mov r3, r2
557 bfins r2, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
558 mtsr SYSREG_SR, r2
559 sub pc, -2
560 popm lr
561 mtsr SYSREG_SR, r3
562 sub pc, -2
563 sub sp, -4 /* skip SP */
564 popm r0-r12
565 sub sp, -4
566 retd
567
568 /*
569 * At this point, everything is masked, that is, interrupts,
570 * exceptions and debugging traps. We might get called from
571 * interrupt or exception context in some rare cases, but this
572 * will be taken care of by do_debug(), so we're not going to
573 * do a 100% correct context save here.
574 */
575 handle_debug:
576 sub sp, 4 /* r12_orig */
577 stmts --sp, r0-lr
578 mfsr r0, SYSREG_RAR_DBG
579 mfsr r1, SYSREG_RSR_DBG
580 #ifdef CONFIG_TRACE_IRQFLAGS
581 rcall trace_hardirqs_off
582 #endif
583 unmask_exceptions
584 stm --sp, r0, r1
585 bfextu r1, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
586 brne handle_debug_priv
587
588 mov r12, sp
589 rcall do_debug
590
591 lddsp r10, sp[REG_SR]
592 andh r10, (MODE_MASK >> 16), COH
593 breq debug_resume_user
594
595 debug_restore_all:
596 popm r10,r11
597 mask_exceptions
598 mtsr SYSREG_RSR_DBG, r11
599 mtsr SYSREG_RAR_DBG, r10
600 #ifdef CONFIG_TRACE_IRQFLAGS
601 bld r11, SYSREG_GM_OFFSET
602 brcc 1f
603 rcall trace_hardirqs_on
604 1:
605 #endif
606 ldmts sp++, r0-lr
607 sub sp, -4
608 retd
609
610 debug_resume_user:
611 get_thread_info r0
612 mask_interrupts
613
614 ld.w r1, r0[TI_flags]
615 andl r1, _TIF_DBGWORK_MASK, COH
616 breq debug_restore_all
617
618 1: bld r1, TIF_NEED_RESCHED
619 brcc 2f
620 unmask_interrupts
621 rcall schedule
622 mask_interrupts
623 ld.w r1, r0[TI_flags]
624 rjmp 1b
625
626 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
627 tst r1, r2
628 breq 3f
629 unmask_interrupts
630 mov r12, sp
631 mov r11, r0
632 rcall do_notify_resume
633 mask_interrupts
634 ld.w r1, r0[TI_flags]
635 rjmp 1b
636
637 3: bld r1, TIF_SINGLE_STEP
638 brcc debug_restore_all
639 mfdr r2, OCD_DC
640 sbr r2, OCD_DC_SS_BIT
641 mtdr OCD_DC, r2
642 rjmp debug_restore_all
643
644 .set rsr_int0, SYSREG_RSR_INT0
645 .set rsr_int1, SYSREG_RSR_INT1
646 .set rsr_int2, SYSREG_RSR_INT2
647 .set rsr_int3, SYSREG_RSR_INT3
648 .set rar_int0, SYSREG_RAR_INT0
649 .set rar_int1, SYSREG_RAR_INT1
650 .set rar_int2, SYSREG_RAR_INT2
651 .set rar_int3, SYSREG_RAR_INT3
652
653 .macro IRQ_LEVEL level
654 .type irq_level\level, @function
655 irq_level\level:
656 sub sp, 4 /* r12_orig */
657 stmts --sp,r0-lr
658 mfsr r8, rar_int\level
659 mfsr r9, rsr_int\level
660
661 #ifdef CONFIG_PREEMPT
662 sub r11, pc, (. - system_call)
663 cp.w r11, r8
664 breq 4f
665 #endif
666
667 pushm r8-r9
668
669 mov r11, sp
670 mov r12, \level
671
672 rcall do_IRQ
673
674 lddsp r4, sp[REG_SR]
675 bfextu r4, r4, SYSREG_M0_OFFSET, 3
676 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
677 breq 2f
678 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
679 #ifdef CONFIG_PREEMPT
680 brne 3f
681 #else
682 brne 1f
683 #endif
684
685 get_thread_info r0
686 ld.w r1, r0[TI_flags]
687 andl r1, _TIF_WORK_MASK, COH
688 brne irq_exit_work
689
690 1:
691 #ifdef CONFIG_TRACE_IRQFLAGS
692 rcall trace_hardirqs_on
693 #endif
694 popm r8-r9
695 mtsr rar_int\level, r8
696 mtsr rsr_int\level, r9
697 ldmts sp++,r0-lr
698 sub sp, -4 /* ignore r12_orig */
699 rete
700
701 #ifdef CONFIG_PREEMPT
702 4: mask_interrupts
703 mfsr r8, rsr_int\level
704 sbr r8, 16
705 mtsr rsr_int\level, r8
706 ldmts sp++, r0-lr
707 sub sp, -4 /* ignore r12_orig */
708 rete
709 #endif
710
711 2: get_thread_info r0
712 ld.w r1, r0[TI_flags]
713 bld r1, TIF_CPU_GOING_TO_SLEEP
714 #ifdef CONFIG_PREEMPT
715 brcc 3f
716 #else
717 brcc 1b
718 #endif
719 sub r1, pc, . - cpu_idle_skip_sleep
720 stdsp sp[REG_PC], r1
721 #ifdef CONFIG_PREEMPT
722 3: get_thread_info r0
723 ld.w r2, r0[TI_preempt_count]
724 cp.w r2, 0
725 brne 1b
726 ld.w r1, r0[TI_flags]
727 bld r1, TIF_NEED_RESCHED
728 brcc 1b
729 lddsp r4, sp[REG_SR]
730 bld r4, SYSREG_GM_OFFSET
731 brcs 1b
732 rcall preempt_schedule_irq
733 #endif
734 rjmp 1b
735 .endm
736
737 .section .irq.text,"ax",@progbits
738
739 .global cpu_idle_sleep
740 cpu_idle_sleep:
741 mask_interrupts
742 get_thread_info r8
743 ld.w r9, r8[TI_flags]
744 bld r9, TIF_NEED_RESCHED
745 brcs cpu_idle_enable_int_and_exit
746 sbr r9, TIF_CPU_GOING_TO_SLEEP
747 st.w r8[TI_flags], r9
748 unmask_interrupts
749 sleep 0
750 cpu_idle_skip_sleep:
751 mask_interrupts
752 ld.w r9, r8[TI_flags]
753 cbr r9, TIF_CPU_GOING_TO_SLEEP
754 st.w r8[TI_flags], r9
755 cpu_idle_enable_int_and_exit:
756 unmask_interrupts
757 retal r12
758
759 .global irq_level0
760 .global irq_level1
761 .global irq_level2
762 .global irq_level3
763 IRQ_LEVEL 0
764 IRQ_LEVEL 1
765 IRQ_LEVEL 2
766 IRQ_LEVEL 3