]>
Commit | Line | Data |
---|---|---|
9d42c84f VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
4788a594 VG |
8 | * Vineetg: March 2009 (Supporting 2 levels of Interrupts) |
9 | * Stack switching code can no longer reliably rely on the fact that | |
10 | * if we are NOT in user mode, stack is switched to kernel mode. | |
11 | * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed | |
12 | * it's prologue including stack switching from user mode | |
13 | * | |
9d42c84f VG |
14 | * Vineetg: Aug 28th 2008: Bug #94984 |
15 | * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap | |
16 | * Normally CPU does this automatically, however when doing FAKE rtie, | |
17 | * we also need to explicitly do this. The problem in macros | |
18 | * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit | |
19 | * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context | |
20 | * | |
21 | * Vineetg: May 5th 2008 | |
080c3747 VG |
22 | * -Modified CALLEE_REG save/restore macros to handle the fact that |
23 | * r25 contains the kernel current task ptr | |
9d42c84f VG |
24 | * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs |
25 | * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the | |
26 | * address Write back load ld.ab instead of seperate ld/add instn | |
27 | * | |
28 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 | |
29 | */ | |
30 | ||
31 | #ifndef __ASM_ARC_ENTRY_H | |
32 | #define __ASM_ARC_ENTRY_H | |
33 | ||
34 | #ifdef __ASSEMBLY__ | |
35 | #include <asm/unistd.h> /* For NR_syscalls defination */ | |
36 | #include <asm/asm-offsets.h> | |
37 | #include <asm/arcregs.h> | |
38 | #include <asm/ptrace.h> | |
080c3747 | 39 | #include <asm/processor.h> /* For VMALLOC_START */ |
9d42c84f | 40 | #include <asm/thread_info.h> /* For THREAD_SIZE */ |
4ffd9e2c | 41 | #include <asm/mmu.h> |
9d42c84f VG |
42 | |
43 | /* Note on the LD/ST addr modes with addr reg wback | |
44 | * | |
45 | * LD.a same as LD.aw | |
46 | * | |
47 | * LD.a reg1, [reg2, x] => Pre Incr | |
48 | * Eff Addr for load = [reg2 + x] | |
49 | * | |
50 | * LD.ab reg1, [reg2, x] => Post Incr | |
51 | * Eff Addr for load = [reg2] | |
52 | */ | |
53 | ||
3ebedbb2 VG |
54 | .macro PUSH reg |
55 | st.a \reg, [sp, -4] | |
56 | .endm | |
57 | ||
58 | .macro PUSHAX aux | |
59 | lr r9, [\aux] | |
60 | PUSH r9 | |
61 | .endm | |
62 | ||
63 | .macro POP reg | |
64 | ld.ab \reg, [sp, 4] | |
65 | .endm | |
66 | ||
67 | .macro POPAX aux | |
68 | POP r9 | |
69 | sr r9, [\aux] | |
70 | .endm | |
71 | ||
9d42c84f | 72 | /*-------------------------------------------------------------- |
3ebedbb2 VG |
73 | * Helpers to save/restore Scratch Regs: |
74 | * used by Interrupt/Exception Prologue/Epilogue | |
9d42c84f | 75 | *-------------------------------------------------------------*/ |
3ebedbb2 VG |
76 | .macro SAVE_R0_TO_R12 |
77 | PUSH r0 | |
78 | PUSH r1 | |
79 | PUSH r2 | |
80 | PUSH r3 | |
81 | PUSH r4 | |
82 | PUSH r5 | |
83 | PUSH r6 | |
84 | PUSH r7 | |
85 | PUSH r8 | |
86 | PUSH r9 | |
87 | PUSH r10 | |
88 | PUSH r11 | |
89 | PUSH r12 | |
90 | .endm | |
91 | ||
92 | .macro RESTORE_R12_TO_R0 | |
93 | POP r12 | |
94 | POP r11 | |
95 | POP r10 | |
96 | POP r9 | |
97 | POP r8 | |
98 | POP r7 | |
99 | POP r6 | |
100 | POP r5 | |
101 | POP r4 | |
102 | POP r3 | |
103 | POP r2 | |
104 | POP r1 | |
105 | POP r0 | |
359105bd VG |
106 | |
107 | #ifdef CONFIG_ARC_CURR_IN_REG | |
108 | ld r25, [sp, 12] | |
109 | #endif | |
9d42c84f VG |
110 | .endm |
111 | ||
112 | /*-------------------------------------------------------------- | |
3ebedbb2 VG |
113 | * Helpers to save/restore callee-saved regs: |
114 | * used by several macros below | |
9d42c84f | 115 | *-------------------------------------------------------------*/ |
3ebedbb2 VG |
116 | .macro SAVE_R13_TO_R24 |
117 | PUSH r13 | |
118 | PUSH r14 | |
119 | PUSH r15 | |
120 | PUSH r16 | |
121 | PUSH r17 | |
122 | PUSH r18 | |
123 | PUSH r19 | |
124 | PUSH r20 | |
125 | PUSH r21 | |
126 | PUSH r22 | |
127 | PUSH r23 | |
128 | PUSH r24 | |
129 | .endm | |
130 | ||
131 | .macro RESTORE_R24_TO_R13 | |
132 | POP r24 | |
133 | POP r23 | |
134 | POP r22 | |
135 | POP r21 | |
136 | POP r20 | |
137 | POP r19 | |
138 | POP r18 | |
139 | POP r17 | |
140 | POP r16 | |
141 | POP r15 | |
142 | POP r14 | |
143 | POP r13 | |
9d42c84f VG |
144 | .endm |
145 | ||
359105bd | 146 | #define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4 |
9d42c84f VG |
147 | |
148 | /*-------------------------------------------------------------- | |
3ebedbb2 VG |
149 | * Collect User Mode callee regs as struct callee_regs - needed by |
150 | * fork/do_signal/unaligned-access-emulation. | |
151 | * (By default only scratch regs are saved on entry to kernel) | |
152 | * | |
153 | * Special handling for r25 if used for caching Task Pointer. | |
154 | * It would have been saved in task->thread.user_r25 already, but to keep | |
155 | * the interface same it is copied into regular r25 placeholder in | |
156 | * struct callee_regs. | |
9d42c84f VG |
157 | *-------------------------------------------------------------*/ |
158 | .macro SAVE_CALLEE_SAVED_USER | |
3ebedbb2 VG |
159 | |
160 | SAVE_R13_TO_R24 | |
080c3747 VG |
161 | |
162 | #ifdef CONFIG_ARC_CURR_IN_REG | |
163 | ; Retrieve orig r25 and save it on stack | |
359105bd | 164 | ld.as r12, [sp, OFF_USER_R25_FROM_R24] |
080c3747 VG |
165 | st.a r12, [sp, -4] |
166 | #else | |
3ebedbb2 | 167 | PUSH r25 |
080c3747 | 168 | #endif |
9d42c84f | 169 | |
9d42c84f VG |
170 | .endm |
171 | ||
172 | /*-------------------------------------------------------------- | |
3ebedbb2 VG |
173 | * Save kernel Mode callee regs at the time of Contect Switch. |
174 | * | |
175 | * Special handling for r25 if used for caching Task Pointer. | |
176 | * Kernel simply skips saving it since it will be loaded with | |
177 | * incoming task pointer anyways | |
9d42c84f VG |
178 | *-------------------------------------------------------------*/ |
179 | .macro SAVE_CALLEE_SAVED_KERNEL | |
3ebedbb2 VG |
180 | |
181 | SAVE_R13_TO_R24 | |
182 | ||
080c3747 | 183 | #ifdef CONFIG_ARC_CURR_IN_REG |
16f9afe6 | 184 | sub sp, sp, 4 |
080c3747 | 185 | #else |
3ebedbb2 | 186 | PUSH r25 |
080c3747 | 187 | #endif |
9d42c84f VG |
188 | .endm |
189 | ||
190 | /*-------------------------------------------------------------- | |
3ebedbb2 | 191 | * Opposite of SAVE_CALLEE_SAVED_KERNEL |
9d42c84f VG |
192 | *-------------------------------------------------------------*/ |
193 | .macro RESTORE_CALLEE_SAVED_KERNEL | |
194 | ||
080c3747 | 195 | #ifdef CONFIG_ARC_CURR_IN_REG |
16f9afe6 | 196 | add sp, sp, 4 /* skip usual r25 placeholder */ |
080c3747 | 197 | #else |
3ebedbb2 | 198 | POP r25 |
080c3747 | 199 | #endif |
3ebedbb2 | 200 | RESTORE_R24_TO_R13 |
9d42c84f VG |
201 | .endm |
202 | ||
c3581039 | 203 | /*-------------------------------------------------------------- |
3ebedbb2 VG |
204 | * Opposite of SAVE_CALLEE_SAVED_USER |
205 | * | |
206 | * ptrace tracer or unaligned-access fixup might have changed a user mode | |
207 | * callee reg which is saved back to usual r25 storage location | |
c3581039 VG |
208 | *-------------------------------------------------------------*/ |
209 | .macro RESTORE_CALLEE_SAVED_USER | |
210 | ||
c3581039 VG |
211 | #ifdef CONFIG_ARC_CURR_IN_REG |
212 | ld.ab r12, [sp, 4] | |
359105bd | 213 | st.as r12, [sp, OFF_USER_R25_FROM_R24] |
c3581039 | 214 | #else |
3ebedbb2 | 215 | POP r25 |
c3581039 | 216 | #endif |
3ebedbb2 | 217 | RESTORE_R24_TO_R13 |
c3581039 VG |
218 | .endm |
219 | ||
9d42c84f VG |
220 | /*-------------------------------------------------------------- |
221 | * Super FAST Restore callee saved regs by simply re-adjusting SP | |
222 | *-------------------------------------------------------------*/ | |
223 | .macro DISCARD_CALLEE_SAVED_USER | |
16f9afe6 | 224 | add sp, sp, SZ_CALLEE_REGS |
9d42c84f VG |
225 | .endm |
226 | ||
9d42c84f VG |
227 | /*------------------------------------------------------------- |
228 | * given a tsk struct, get to the base of it's kernel mode stack | |
229 | * tsk->thread_info is really a PAGE, whose bottom hoists stack | |
230 | * which grows upwards towards thread_info | |
231 | *------------------------------------------------------------*/ | |
232 | ||
233 | .macro GET_TSK_STACK_BASE tsk, out | |
234 | ||
235 | /* Get task->thread_info (this is essentially start of a PAGE) */ | |
236 | ld \out, [\tsk, TASK_THREAD_INFO] | |
237 | ||
238 | /* Go to end of page where stack begins (grows upwards) */ | |
283237a0 | 239 | add2 \out, \out, (THREAD_SIZE)/4 |
9d42c84f VG |
240 | |
241 | .endm | |
242 | ||
243 | /*-------------------------------------------------------------- | |
244 | * Switch to Kernel Mode stack if SP points to User Mode stack | |
245 | * | |
246 | * Entry : r9 contains pre-IRQ/exception/trap status32 | |
247 | * Exit : SP is set to kernel mode stack pointer | |
080c3747 | 248 | * If CURR_IN_REG, r25 set to "current" task pointer |
9d42c84f VG |
249 | * Clobbers: r9 |
250 | *-------------------------------------------------------------*/ | |
251 | ||
252 | .macro SWITCH_TO_KERNEL_STK | |
253 | ||
254 | /* User Mode when this happened ? Yes: Proceed to switch stack */ | |
255 | bbit1 r9, STATUS_U_BIT, 88f | |
256 | ||
257 | /* OK we were already in kernel mode when this event happened, thus can | |
258 | * assume SP is kernel mode SP. _NO_ need to do any stack switching | |
259 | */ | |
260 | ||
4788a594 VG |
261 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS |
262 | /* However.... | |
263 | * If Level 2 Interrupts enabled, we may end up with a corner case: | |
264 | * 1. User Task executing | |
265 | * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode) | |
266 | * 3. But before it could switch SP from USER to KERNEL stack | |
267 | * a L2 IRQ "Interrupts" L1 | |
268 | * Thay way although L2 IRQ happened in Kernel mode, stack is still | |
269 | * not switched. | |
270 | * To handle this, we may need to switch stack even if in kernel mode | |
271 | * provided SP has values in range of USER mode stack ( < 0x7000_0000 ) | |
272 | */ | |
273 | brlo sp, VMALLOC_START, 88f | |
274 | ||
275 | /* TODO: vineetg: | |
276 | * We need to be a bit more cautious here. What if a kernel bug in | |
277 | * L1 ISR, caused SP to go whaco (some small value which looks like | |
278 | * USER stk) and then we take L2 ISR. | |
279 | * Above brlo alone would treat it as a valid L1-L2 sceanrio | |
280 | * instead of shouting alound | |
281 | * The only feasible way is to make sure this L2 happened in | |
282 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in | |
283 | * L1 ISR before it switches stack | |
284 | */ | |
285 | ||
286 | #endif | |
287 | ||
9d42c84f VG |
288 | /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack |
289 | * safe-keeping not really needed, but it keeps the epilogue code | |
290 | * (SP restore) simpler/uniform. | |
291 | */ | |
ba3558c7 VG |
292 | b.d 66f |
293 | mov r9, sp | |
9d42c84f VG |
294 | |
295 | 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ | |
296 | ||
297 | GET_CURR_TASK_ON_CPU r9 | |
298 | ||
299 | /* With current tsk in r9, get it's kernel mode stack base */ | |
300 | GET_TSK_STACK_BASE r9, r9 | |
301 | ||
ba3558c7 | 302 | 66: |
359105bd VG |
303 | #ifdef CONFIG_ARC_CURR_IN_REG |
304 | /* | |
305 | * Treat r25 as scratch reg, save it on stack first | |
306 | * Load it with current task pointer | |
307 | */ | |
308 | st r25, [r9, -4] | |
309 | GET_CURR_TASK_ON_CPU r25 | |
310 | #endif | |
311 | ||
9d42c84f | 312 | /* Save Pre Intr/Exception User SP on kernel stack */ |
502a0c77 | 313 | st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25 |
9d42c84f VG |
314 | |
315 | /* CAUTION: | |
316 | * SP should be set at the very end when we are done with everything | |
317 | * In case of 2 levels of interrupt we depend on value of SP to assume | |
318 | * that everything else is done (loading r25 etc) | |
319 | */ | |
320 | ||
321 | /* set SP to point to kernel mode stack */ | |
322 | mov sp, r9 | |
323 | ||
ba3558c7 | 324 | /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ |
9d42c84f VG |
325 | |
326 | .endm | |
327 | ||
328 | /*------------------------------------------------------------ | |
329 | * "FAKE" a rtie to return from CPU Exception context | |
330 | * This is to re-enable Exceptions within exception | |
331 | * Look at EV_ProtV to see how this is actually used | |
332 | *-------------------------------------------------------------*/ | |
333 | ||
334 | .macro FAKE_RET_FROM_EXCPN reg | |
335 | ||
336 | ld \reg, [sp, PT_status32] | |
337 | bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK) | |
338 | bset \reg, \reg, STATUS_L_BIT | |
339 | sr \reg, [erstatus] | |
340 | mov \reg, 55f | |
341 | sr \reg, [eret] | |
342 | ||
343 | rtie | |
344 | 55: | |
345 | .endm | |
346 | ||
347 | /* | |
348 | * @reg [OUT] &thread_info of "current" | |
349 | */ | |
350 | .macro GET_CURR_THR_INFO_FROM_SP reg | |
3ebedbb2 | 351 | bic \reg, sp, (THREAD_SIZE - 1) |
9d42c84f VG |
352 | .endm |
353 | ||
354 | /* | |
355 | * @reg [OUT] thread_info->flags of "current" | |
356 | */ | |
357 | .macro GET_CURR_THR_INFO_FLAGS reg | |
358 | GET_CURR_THR_INFO_FROM_SP \reg | |
359 | ld \reg, [\reg, THREAD_INFO_FLAGS] | |
360 | .endm | |
361 | ||
362 | /*-------------------------------------------------------------- | |
fbfa26ae | 363 | * For early Exception/ISR Prologue, a core reg is temporarily needed to |
9d42c84f VG |
364 | * code the rest of prolog (stack switching). This is done by stashing |
365 | * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). | |
366 | * | |
367 | * Before saving the full regfile - this reg is restored back, only | |
37f3ac49 | 368 | * to be saved again on kernel mode stack, as part of pt_regs. |
9d42c84f | 369 | *-------------------------------------------------------------*/ |
fbfa26ae | 370 | .macro PROLOG_FREEUP_REG reg, mem |
41195d23 VG |
371 | #ifdef CONFIG_SMP |
372 | sr \reg, [ARC_REG_SCRATCH_DATA0] | |
373 | #else | |
fbfa26ae | 374 | st \reg, [\mem] |
41195d23 | 375 | #endif |
9d42c84f VG |
376 | .endm |
377 | ||
fbfa26ae | 378 | .macro PROLOG_RESTORE_REG reg, mem |
41195d23 VG |
379 | #ifdef CONFIG_SMP |
380 | lr \reg, [ARC_REG_SCRATCH_DATA0] | |
381 | #else | |
fbfa26ae | 382 | ld \reg, [\mem] |
41195d23 | 383 | #endif |
9d42c84f VG |
384 | .endm |
385 | ||
37f3ac49 VG |
386 | /*-------------------------------------------------------------- |
387 | * Exception Entry prologue | |
388 | * -Switches stack to K mode (if not already) | |
389 | * -Saves the register file | |
390 | * | |
391 | * After this it is safe to call the "C" handlers | |
392 | *-------------------------------------------------------------*/ | |
393 | .macro EXCEPTION_PROLOGUE | |
394 | ||
395 | /* Need at least 1 reg to code the early exception prologue */ | |
fbfa26ae | 396 | PROLOG_FREEUP_REG r9, @ex_saved_reg1 |
37f3ac49 VG |
397 | |
398 | /* U/K mode at time of exception (stack not switched if already K) */ | |
399 | lr r9, [erstatus] | |
400 | ||
401 | /* ARC700 doesn't provide auto-stack switching */ | |
402 | SWITCH_TO_KERNEL_STK | |
403 | ||
404 | /* save the regfile */ | |
405 | SAVE_ALL_SYS | |
406 | .endm | |
407 | ||
9d42c84f VG |
408 | /*-------------------------------------------------------------- |
409 | * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) | |
410 | * Requires SP to be already switched to kernel mode Stack | |
411 | * sp points to the next free element on the stack at exit of this macro. | |
412 | * Registers are pushed / popped in the order defined in struct ptregs | |
413 | * in asm/ptrace.h | |
414 | * Note that syscalls are implemented via TRAP which is also a exception | |
415 | * from CPU's point of view | |
416 | *-------------------------------------------------------------*/ | |
502a0c77 | 417 | .macro SAVE_ALL_SYS |
9d42c84f | 418 | |
502a0c77 VG |
419 | lr r9, [ecr] |
420 | st r9, [sp, 8] /* ECR */ | |
5c39c0ab VG |
421 | st r0, [sp, 4] /* orig_r0, needed only for sys calls */ |
422 | ||
9d42c84f | 423 | /* Restore r9 used to code the early prologue */ |
fbfa26ae | 424 | PROLOG_RESTORE_REG r9, @ex_saved_reg1 |
9d42c84f | 425 | |
3ebedbb2 VG |
426 | SAVE_R0_TO_R12 |
427 | PUSH gp | |
428 | PUSH fp | |
429 | PUSH blink | |
430 | PUSHAX eret | |
431 | PUSHAX erstatus | |
432 | PUSH lp_count | |
433 | PUSHAX lp_end | |
434 | PUSHAX lp_start | |
435 | PUSHAX erbta | |
9d42c84f VG |
436 | .endm |
437 | ||
9d42c84f VG |
438 | /*-------------------------------------------------------------- |
439 | * Restore all registers used by system call or Exceptions | |
440 | * SP should always be pointing to the next free stack element | |
441 | * when entering this macro. | |
442 | * | |
443 | * NOTE: | |
444 | * | |
445 | * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg | |
446 | * for memory load operations. If used in that way interrupts are deffered | |
447 | * by hardware and that is not good. | |
448 | *-------------------------------------------------------------*/ | |
449 | .macro RESTORE_ALL_SYS | |
3ebedbb2 VG |
450 | POPAX erbta |
451 | POPAX lp_start | |
452 | POPAX lp_end | |
453 | ||
454 | POP r9 | |
455 | mov lp_count, r9 ;LD to lp_count is not allowed | |
456 | ||
457 | POPAX erstatus | |
458 | POPAX eret | |
459 | POP blink | |
460 | POP fp | |
461 | POP gp | |
462 | RESTORE_R12_TO_R0 | |
9d42c84f VG |
463 | |
464 | ld sp, [sp] /* restore original sp */ | |
502a0c77 | 465 | /* orig_r0, ECR, user_r25 skipped automatically */ |
9d42c84f VG |
466 | .endm |
467 | ||
09f3b37e VG |
468 | /* Dummy ECR values for Interrupts */ |
469 | #define event_IRQ1 0x0031abcd | |
470 | #define event_IRQ2 0x0032abcd | |
9d42c84f | 471 | |
09f3b37e | 472 | .macro INTERRUPT_PROLOGUE LVL |
9d42c84f | 473 | |
09f3b37e VG |
474 | /* free up r9 as scratchpad */ |
475 | PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg | |
9d42c84f | 476 | |
09f3b37e VG |
477 | /* Which mode (user/kernel) was the system in when intr occurred */ |
478 | lr r9, [status32_l\LVL\()] | |
3ebedbb2 | 479 | |
09f3b37e | 480 | SWITCH_TO_KERNEL_STK |
4788a594 | 481 | |
fbfa26ae | 482 | /* restore original r9 */ |
09f3b37e | 483 | PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg |
4788a594 | 484 | |
09f3b37e VG |
485 | /* now we are ready to save the remaining context */ |
486 | st 0x003\LVL\()abcd, [sp, 8] /* Dummy ECR */ | |
4788a594 | 487 | st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ |
3ebedbb2 VG |
488 | |
489 | SAVE_R0_TO_R12 | |
490 | PUSH gp | |
491 | PUSH fp | |
492 | PUSH blink | |
09f3b37e VG |
493 | PUSH ilink\LVL\() |
494 | PUSHAX status32_l\LVL\() | |
3ebedbb2 VG |
495 | PUSH lp_count |
496 | PUSHAX lp_end | |
497 | PUSHAX lp_start | |
09f3b37e | 498 | PUSHAX bta_l\LVL\() |
4788a594 VG |
499 | .endm |
500 | ||
9d42c84f VG |
501 | /*-------------------------------------------------------------- |
502 | * Restore all registers used by interrupt handlers. | |
503 | * | |
504 | * NOTE: | |
505 | * | |
506 | * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg | |
507 | * for memory load operations. If used in that way interrupts are deffered | |
508 | * by hardware and that is not good. | |
509 | *-------------------------------------------------------------*/ | |
09f3b37e VG |
510 | .macro INTERRUPT_EPILOGUE LVL |
511 | POPAX bta_l\LVL\() | |
3ebedbb2 VG |
512 | POPAX lp_start |
513 | POPAX lp_end | |
514 | ||
515 | POP r9 | |
516 | mov lp_count, r9 ;LD to lp_count is not allowed | |
517 | ||
09f3b37e VG |
518 | POPAX status32_l\LVL\() |
519 | POP ilink\LVL\() | |
3ebedbb2 VG |
520 | POP blink |
521 | POP fp | |
522 | POP gp | |
523 | RESTORE_R12_TO_R0 | |
9d42c84f VG |
524 | |
525 | ld sp, [sp] /* restore original sp */ | |
502a0c77 | 526 | /* orig_r0, ECR, user_r25 skipped automatically */ |
9d42c84f VG |
527 | .endm |
528 | ||
529 | /* Get CPU-ID of this core */ | |
530 | .macro GET_CPU_ID reg | |
531 | lr \reg, [identity] | |
532 | lsr \reg, \reg, 8 | |
533 | bmsk \reg, \reg, 7 | |
534 | .endm | |
535 | ||
41195d23 VG |
536 | #ifdef CONFIG_SMP |
537 | ||
538 | /*------------------------------------------------- | |
539 | * Retrieve the current running task on this CPU | |
540 | * 1. Determine curr CPU id. | |
541 | * 2. Use it to index into _current_task[ ] | |
542 | */ | |
543 | .macro GET_CURR_TASK_ON_CPU reg | |
544 | GET_CPU_ID \reg | |
545 | ld.as \reg, [@_current_task, \reg] | |
546 | .endm | |
547 | ||
548 | /*------------------------------------------------- | |
549 | * Save a new task as the "current" task on this CPU | |
550 | * 1. Determine curr CPU id. | |
551 | * 2. Use it to index into _current_task[ ] | |
552 | * | |
553 | * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS) | |
554 | * because ST r0, [r1, offset] can ONLY have s9 @offset | |
555 | * while LD can take s9 (4 byte insn) or LIMM (8 byte insn) | |
556 | */ | |
557 | ||
558 | .macro SET_CURR_TASK_ON_CPU tsk, tmp | |
559 | GET_CPU_ID \tmp | |
560 | add2 \tmp, @_current_task, \tmp | |
561 | st \tsk, [\tmp] | |
562 | #ifdef CONFIG_ARC_CURR_IN_REG | |
563 | mov r25, \tsk | |
564 | #endif | |
565 | ||
566 | .endm | |
567 | ||
568 | ||
569 | #else /* Uniprocessor implementation of macros */ | |
570 | ||
9d42c84f VG |
571 | .macro GET_CURR_TASK_ON_CPU reg |
572 | ld \reg, [@_current_task] | |
573 | .endm | |
574 | ||
575 | .macro SET_CURR_TASK_ON_CPU tsk, tmp | |
576 | st \tsk, [@_current_task] | |
080c3747 VG |
577 | #ifdef CONFIG_ARC_CURR_IN_REG |
578 | mov r25, \tsk | |
579 | #endif | |
9d42c84f VG |
580 | .endm |
581 | ||
41195d23 VG |
582 | #endif /* SMP / UNI */ |
583 | ||
9d42c84f VG |
584 | /* ------------------------------------------------------------------ |
585 | * Get the ptr to some field of Current Task at @off in task struct | |
080c3747 | 586 | * -Uses r25 for Current task ptr if that is enabled |
9d42c84f VG |
587 | */ |
588 | ||
080c3747 VG |
589 | #ifdef CONFIG_ARC_CURR_IN_REG |
590 | ||
591 | .macro GET_CURR_TASK_FIELD_PTR off, reg | |
592 | add \reg, r25, \off | |
593 | .endm | |
594 | ||
595 | #else | |
596 | ||
9d42c84f VG |
597 | .macro GET_CURR_TASK_FIELD_PTR off, reg |
598 | GET_CURR_TASK_ON_CPU \reg | |
599 | add \reg, \reg, \off | |
600 | .endm | |
601 | ||
080c3747 VG |
602 | #endif /* CONFIG_ARC_CURR_IN_REG */ |
603 | ||
9d42c84f VG |
604 | #endif /* __ASSEMBLY__ */ |
605 | ||
606 | #endif /* __ASM_ARC_ENTRY_H */ |