]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
619b6e18 | 8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki |
2a0b24f5 | 9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. |
1da177e4 | 10 | */ |
1da177e4 LT |
11 | #include <linux/init.h> |
12 | ||
13 | #include <asm/asm.h> | |
41c594ab | 14 | #include <asm/asmmacro.h> |
1da177e4 | 15 | #include <asm/cacheops.h> |
192ef366 | 16 | #include <asm/irqflags.h> |
1da177e4 LT |
17 | #include <asm/regdef.h> |
18 | #include <asm/fpregdef.h> | |
19 | #include <asm/mipsregs.h> | |
20 | #include <asm/stackframe.h> | |
21 | #include <asm/war.h> | |
c65a5480 | 22 | #include <asm/thread_info.h> |
1da177e4 | 23 | |
1da177e4 LT |
24 | __INIT |
25 | ||
1da177e4 LT |
26 | /* |
27 | * General exception vector for all other CPUs. | |
28 | * | |
29 | * Be careful when changing this, it has to be at most 128 bytes | |
30 | * to fit into space reserved for the exception handler. | |
31 | */ | |
32 | NESTED(except_vec3_generic, 0, sp) | |
33 | .set push | |
34 | .set noat | |
35 | #if R5432_CP0_INTERRUPT_WAR | |
36 | mfc0 k0, CP0_INDEX | |
37 | #endif | |
38 | mfc0 k1, CP0_CAUSE | |
39 | andi k1, k1, 0x7c | |
875d43e7 | 40 | #ifdef CONFIG_64BIT |
1da177e4 LT |
41 | dsll k1, k1, 1 |
42 | #endif | |
43 | PTR_L k0, exception_handlers(k1) | |
44 | jr k0 | |
45 | .set pop | |
46 | END(except_vec3_generic) | |
47 | ||
48 | /* | |
49 | * General exception handler for CPUs with virtual coherency exception. | |
50 | * | |
51 | * Be careful when changing this, it has to be at most 256 (as a special | |
52 | * exception) bytes to fit into space reserved for the exception handler. | |
53 | */ | |
54 | NESTED(except_vec3_r4000, 0, sp) | |
55 | .set push | |
a809d460 | 56 | .set arch=r4000 |
1da177e4 LT |
57 | .set noat |
58 | mfc0 k1, CP0_CAUSE | |
59 | li k0, 31<<2 | |
60 | andi k1, k1, 0x7c | |
61 | .set push | |
62 | .set noreorder | |
63 | .set nomacro | |
64 | beq k1, k0, handle_vced | |
65 | li k0, 14<<2 | |
66 | beq k1, k0, handle_vcei | |
875d43e7 | 67 | #ifdef CONFIG_64BIT |
69903d65 | 68 | dsll k1, k1, 1 |
1da177e4 LT |
69 | #endif |
70 | .set pop | |
71 | PTR_L k0, exception_handlers(k1) | |
72 | jr k0 | |
73 | ||
74 | /* | |
75 | * Big shit, we now may have two dirty primary cache lines for the same | |
69903d65 | 76 | * physical address. We can safely invalidate the line pointed to by |
1da177e4 LT |
77 | * c0_badvaddr because after return from this exception handler the |
78 | * load / store will be re-executed. | |
79 | */ | |
80 | handle_vced: | |
69903d65 | 81 | MFC0 k0, CP0_BADVADDR |
1da177e4 LT |
82 | li k1, -4 # Is this ... |
83 | and k0, k1 # ... really needed? | |
84 | mtc0 zero, CP0_TAGLO | |
69903d65 TS |
85 | cache Index_Store_Tag_D, (k0) |
86 | cache Hit_Writeback_Inv_SD, (k0) | |
1da177e4 LT |
87 | #ifdef CONFIG_PROC_FS |
88 | PTR_LA k0, vced_count | |
89 | lw k1, (k0) | |
90 | addiu k1, 1 | |
91 | sw k1, (k0) | |
92 | #endif | |
93 | eret | |
94 | ||
95 | handle_vcei: | |
96 | MFC0 k0, CP0_BADVADDR | |
97 | cache Hit_Writeback_Inv_SD, (k0) # also cleans pi | |
98 | #ifdef CONFIG_PROC_FS | |
99 | PTR_LA k0, vcei_count | |
100 | lw k1, (k0) | |
101 | addiu k1, 1 | |
102 | sw k1, (k0) | |
103 | #endif | |
104 | eret | |
105 | .set pop | |
106 | END(except_vec3_r4000) | |
107 | ||
e4ac58af RB |
108 | __FINIT |
109 | ||
c65a5480 | 110 | .align 5 /* 32 byte rollback region */ |
087d990b | 111 | LEAF(__r4k_wait) |
c65a5480 AN |
112 | .set push |
113 | .set noreorder | |
114 | /* start of rollback region */ | |
115 | LONG_L t0, TI_FLAGS($28) | |
116 | nop | |
117 | andi t0, _TIF_NEED_RESCHED | |
118 | bnez t0, 1f | |
119 | nop | |
120 | nop | |
121 | nop | |
2a0b24f5 SH |
122 | #ifdef CONFIG_CPU_MICROMIPS |
123 | nop | |
124 | nop | |
125 | nop | |
126 | nop | |
127 | #endif | |
938c1282 | 128 | .set MIPS_ISA_ARCH_LEVEL_RAW |
c65a5480 AN |
129 | wait |
130 | /* end of rollback region (the region size must be power of two) */ | |
c65a5480 AN |
131 | 1: |
132 | jr ra | |
105c22c5 | 133 | nop |
2a0b24f5 | 134 | .set pop |
087d990b | 135 | END(__r4k_wait) |
c65a5480 AN |
136 | |
137 | .macro BUILD_ROLLBACK_PROLOGUE handler | |
138 | FEXPORT(rollback_\handler) | |
139 | .set push | |
140 | .set noat | |
141 | MFC0 k0, CP0_EPC | |
087d990b | 142 | PTR_LA k1, __r4k_wait |
c65a5480 AN |
143 | ori k0, 0x1f /* 32 byte rollback region */ |
144 | xori k0, 0x1f | |
1eefcbc8 | 145 | bne k0, k1, \handler |
c65a5480 | 146 | MTC0 k0, CP0_EPC |
c65a5480 AN |
147 | .set pop |
148 | .endm | |
149 | ||
70342287 | 150 | .align 5 |
c65a5480 | 151 | BUILD_ROLLBACK_PROLOGUE handle_int |
e4ac58af | 152 | NESTED(handle_int, PT_SIZE, sp) |
fe99f1b1 CD |
153 | #ifdef CONFIG_TRACE_IRQFLAGS |
154 | /* | |
155 | * Check to see if the interrupted code has just disabled | |
156 | * interrupts and ignore this interrupt for now if so. | |
157 | * | |
158 | * local_irq_disable() disables interrupts and then calls | |
159 | * trace_hardirqs_off() to track the state. If an interrupt is taken | |
160 | * after interrupts are disabled but before the state is updated | |
161 | * it will appear to restore_all that it is incorrectly returning with | |
162 | * interrupts disabled | |
163 | */ | |
164 | .set push | |
165 | .set noat | |
166 | mfc0 k0, CP0_STATUS | |
167 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
168 | and k0, ST0_IEP | |
169 | bnez k0, 1f | |
170 | ||
c6563e85 | 171 | mfc0 k0, CP0_EPC |
fe99f1b1 CD |
172 | .set noreorder |
173 | j k0 | |
105c22c5 | 174 | rfe |
fe99f1b1 CD |
175 | #else |
176 | and k0, ST0_IE | |
177 | bnez k0, 1f | |
178 | ||
179 | eret | |
180 | #endif | |
181 | 1: | |
182 | .set pop | |
183 | #endif | |
e4ac58af RB |
184 | SAVE_ALL |
185 | CLI | |
192ef366 | 186 | TRACE_IRQS_OFF |
e4ac58af | 187 | |
937a8015 RB |
188 | LONG_L s0, TI_REGS($28) |
189 | LONG_S sp, TI_REGS($28) | |
dda45f70 MR |
190 | |
191 | /* | |
192 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. | |
193 | * Check if we are already using the IRQ stack. | |
194 | */ | |
195 | move s1, sp # Preserve the sp | |
196 | ||
197 | /* Get IRQ stack for this CPU */ | |
198 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG | |
199 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) | |
200 | lui k1, %hi(irq_stack) | |
201 | #else | |
202 | lui k1, %highest(irq_stack) | |
203 | daddiu k1, %higher(irq_stack) | |
204 | dsll k1, 16 | |
205 | daddiu k1, %hi(irq_stack) | |
206 | dsll k1, 16 | |
207 | #endif | |
208 | LONG_SRL k0, SMP_CPUID_PTRSHIFT | |
209 | LONG_ADDU k1, k0 | |
210 | LONG_L t0, %lo(irq_stack)(k1) | |
211 | ||
212 | # Check if already on IRQ stack | |
213 | PTR_LI t1, ~(_THREAD_SIZE-1) | |
214 | and t1, t1, sp | |
215 | beq t0, t1, 2f | |
216 | ||
217 | /* Switch to IRQ stack */ | |
db8466c5 | 218 | li t1, _IRQ_STACK_START |
dda45f70 MR |
219 | PTR_ADD sp, t0, t1 |
220 | ||
db8466c5 MR |
221 | /* Save task's sp on IRQ stack so that unwinding can follow it */ |
222 | LONG_S s1, 0(sp) | |
dda45f70 MR |
223 | 2: |
224 | jal plat_irq_dispatch | |
225 | ||
226 | /* Restore sp */ | |
227 | move sp, s1 | |
228 | ||
229 | j ret_from_irq | |
2a0b24f5 SH |
230 | #ifdef CONFIG_CPU_MICROMIPS |
231 | nop | |
232 | #endif | |
e4ac58af RB |
233 | END(handle_int) |
234 | ||
235 | __INIT | |
236 | ||
1da177e4 LT |
237 | /* |
238 | * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. | |
239 | * This is a dedicated interrupt exception vector which reduces the | |
240 | * interrupt processing overhead. The jump instruction will be replaced | |
241 | * at the initialization time. | |
242 | * | |
243 | * Be careful when changing this, it has to be at most 128 bytes | |
244 | * to fit into space reserved for the exception handler. | |
245 | */ | |
246 | NESTED(except_vec4, 0, sp) | |
247 | 1: j 1b /* Dummy, will be replaced */ | |
248 | END(except_vec4) | |
249 | ||
250 | /* | |
251 | * EJTAG debug exception handler. | |
252 | * The EJTAG debug exception entry point is 0xbfc00480, which | |
2a0b24f5 | 253 | * normally is in the boot PROM, so the boot PROM must do an |
1da177e4 LT |
254 | * unconditional jump to this vector. |
255 | */ | |
256 | NESTED(except_vec_ejtag_debug, 0, sp) | |
257 | j ejtag_debug_handler | |
2a0b24f5 SH |
258 | #ifdef CONFIG_CPU_MICROMIPS |
259 | nop | |
260 | #endif | |
1da177e4 LT |
261 | END(except_vec_ejtag_debug) |
262 | ||
263 | __FINIT | |
264 | ||
e01402b1 RB |
265 | /* |
266 | * Vectored interrupt handler. | |
267 | * This prototype is copied to ebase + n*IntCtl.VS and patched | |
268 | * to invoke the handler | |
269 | */ | |
c65a5480 | 270 | BUILD_ROLLBACK_PROLOGUE except_vec_vi |
e01402b1 RB |
271 | NESTED(except_vec_vi, 0, sp) |
272 | SAVE_SOME | |
273 | SAVE_AT | |
274 | .set push | |
275 | .set noreorder | |
2a0b24f5 | 276 | PTR_LA v1, except_vec_vi_handler |
7df42461 | 277 | FEXPORT(except_vec_vi_lui) |
e01402b1 | 278 | lui v0, 0 /* Patched */ |
2a0b24f5 | 279 | jr v1 |
7df42461 | 280 | FEXPORT(except_vec_vi_ori) |
e01402b1 RB |
281 | ori v0, 0 /* Patched */ |
282 | .set pop | |
283 | END(except_vec_vi) | |
284 | EXPORT(except_vec_vi_end) | |
285 | ||
286 | /* | |
287 | * Common Vectored Interrupt code | |
288 | * Complete the register saves and invoke the handler which is passed in $v0 | |
289 | */ | |
290 | NESTED(except_vec_vi_handler, 0, sp) | |
291 | SAVE_TEMP | |
292 | SAVE_STATIC | |
293 | CLI | |
8c364435 RB |
294 | #ifdef CONFIG_TRACE_IRQFLAGS |
295 | move s0, v0 | |
192ef366 | 296 | TRACE_IRQS_OFF |
8c364435 RB |
297 | move v0, s0 |
298 | #endif | |
937a8015 RB |
299 | |
300 | LONG_L s0, TI_REGS($28) | |
301 | LONG_S sp, TI_REGS($28) | |
dda45f70 MR |
302 | |
303 | /* | |
304 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. | |
305 | * Check if we are already using the IRQ stack. | |
306 | */ | |
307 | move s1, sp # Preserve the sp | |
308 | ||
309 | /* Get IRQ stack for this CPU */ | |
310 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG | |
311 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) | |
312 | lui k1, %hi(irq_stack) | |
313 | #else | |
314 | lui k1, %highest(irq_stack) | |
315 | daddiu k1, %higher(irq_stack) | |
316 | dsll k1, 16 | |
317 | daddiu k1, %hi(irq_stack) | |
318 | dsll k1, 16 | |
319 | #endif | |
320 | LONG_SRL k0, SMP_CPUID_PTRSHIFT | |
321 | LONG_ADDU k1, k0 | |
322 | LONG_L t0, %lo(irq_stack)(k1) | |
323 | ||
324 | # Check if already on IRQ stack | |
325 | PTR_LI t1, ~(_THREAD_SIZE-1) | |
326 | and t1, t1, sp | |
327 | beq t0, t1, 2f | |
328 | ||
329 | /* Switch to IRQ stack */ | |
db8466c5 | 330 | li t1, _IRQ_STACK_START |
dda45f70 MR |
331 | PTR_ADD sp, t0, t1 |
332 | ||
db8466c5 MR |
333 | /* Save task's sp on IRQ stack so that unwinding can follow it */ |
334 | LONG_S s1, 0(sp) | |
dda45f70 | 335 | 2: |
c25f8064 | 336 | jalr v0 |
dda45f70 MR |
337 | |
338 | /* Restore sp */ | |
339 | move sp, s1 | |
340 | ||
341 | j ret_from_irq | |
e01402b1 RB |
342 | END(except_vec_vi_handler) |
343 | ||
1da177e4 LT |
344 | /* |
345 | * EJTAG debug exception handler. | |
346 | */ | |
347 | NESTED(ejtag_debug_handler, PT_SIZE, sp) | |
348 | .set push | |
349 | .set noat | |
350 | MTC0 k0, CP0_DESAVE | |
351 | mfc0 k0, CP0_DEBUG | |
352 | ||
353 | sll k0, k0, 30 # Check for SDBBP. | |
354 | bgez k0, ejtag_return | |
355 | ||
356 | PTR_LA k0, ejtag_debug_buffer | |
357 | LONG_S k1, 0(k0) | |
358 | SAVE_ALL | |
359 | move a0, sp | |
360 | jal ejtag_exception_handler | |
361 | RESTORE_ALL | |
362 | PTR_LA k0, ejtag_debug_buffer | |
363 | LONG_L k1, 0(k0) | |
364 | ||
365 | ejtag_return: | |
366 | MFC0 k0, CP0_DESAVE | |
367 | .set mips32 | |
368 | deret | |
105c22c5 | 369 | .set pop |
1da177e4 LT |
370 | END(ejtag_debug_handler) |
371 | ||
372 | /* | |
373 | * This buffer is reserved for the use of the EJTAG debug | |
374 | * handler. | |
375 | */ | |
376 | .data | |
377 | EXPORT(ejtag_debug_buffer) | |
378 | .fill LONGSIZE | |
379 | .previous | |
380 | ||
381 | __INIT | |
382 | ||
383 | /* | |
384 | * NMI debug exception handler for MIPS reference boards. | |
385 | * The NMI debug exception entry point is 0xbfc00000, which | |
386 | * normally is in the boot PROM, so the boot PROM must do a | |
387 | * unconditional jump to this vector. | |
388 | */ | |
389 | NESTED(except_vec_nmi, 0, sp) | |
390 | j nmi_handler | |
2a0b24f5 SH |
391 | #ifdef CONFIG_CPU_MICROMIPS |
392 | nop | |
393 | #endif | |
1da177e4 LT |
394 | END(except_vec_nmi) |
395 | ||
396 | __FINIT | |
397 | ||
398 | NESTED(nmi_handler, PT_SIZE, sp) | |
399 | .set push | |
400 | .set noat | |
83e4da1e LY |
401 | /* |
402 | * Clear ERL - restore segment mapping | |
403 | * Clear BEV - required for page fault exception handler to work | |
404 | */ | |
405 | mfc0 k0, CP0_STATUS | |
105c22c5 | 406 | ori k0, k0, ST0_EXL |
83e4da1e | 407 | li k1, ~(ST0_BEV | ST0_ERL) |
105c22c5 JH |
408 | and k0, k0, k1 |
409 | mtc0 k0, CP0_STATUS | |
83e4da1e | 410 | _ehb |
1da177e4 | 411 | SAVE_ALL |
70342287 | 412 | move a0, sp |
1da177e4 | 413 | jal nmi_exception_handler |
83e4da1e | 414 | /* nmi_exception_handler never returns */ |
1da177e4 LT |
415 | .set pop |
416 | END(nmi_handler) | |
417 | ||
418 | .macro __build_clear_none | |
419 | .endm | |
420 | ||
421 | .macro __build_clear_sti | |
192ef366 | 422 | TRACE_IRQS_ON |
1da177e4 LT |
423 | STI |
424 | .endm | |
425 | ||
426 | .macro __build_clear_cli | |
427 | CLI | |
192ef366 | 428 | TRACE_IRQS_OFF |
1da177e4 LT |
429 | .endm |
430 | ||
431 | .macro __build_clear_fpe | |
25c30003 DD |
432 | .set push |
433 | /* gas fails to assemble cfc1 for some archs (octeon).*/ \ | |
434 | .set mips1 | |
842dfc11 | 435 | SET_HARDFLOAT |
1da177e4 | 436 | cfc1 a1, fcr31 |
25c30003 | 437 | .set pop |
64bedffe JH |
438 | CLI |
439 | TRACE_IRQS_OFF | |
1da177e4 LT |
440 | .endm |
441 | ||
091be550 PB |
442 | .macro __build_clear_msa_fpe |
443 | _cfcmsa a1, MSA_CSR | |
64bedffe JH |
444 | CLI |
445 | TRACE_IRQS_OFF | |
091be550 PB |
446 | .endm |
447 | ||
1da177e4 LT |
448 | .macro __build_clear_ade |
449 | MFC0 t0, CP0_BADVADDR | |
450 | PTR_S t0, PT_BVADDR(sp) | |
451 | KMODE | |
452 | .endm | |
453 | ||
454 | .macro __BUILD_silent exception | |
455 | .endm | |
456 | ||
457 | /* Gas tries to parse the PRINT argument as a string containing | |
458 | string escapes and emits bogus warnings if it believes to | |
459 | recognize an unknown escape code. So make the arguments | |
460 | start with an n and gas will believe \n is ok ... */ | |
70342287 | 461 | .macro __BUILD_verbose nexception |
1da177e4 | 462 | LONG_L a1, PT_EPC(sp) |
766160c2 | 463 | #ifdef CONFIG_32BIT |
1da177e4 | 464 | PRINT("Got \nexception at %08lx\012") |
42a3b4f2 | 465 | #endif |
766160c2 | 466 | #ifdef CONFIG_64BIT |
1da177e4 | 467 | PRINT("Got \nexception at %016lx\012") |
42a3b4f2 | 468 | #endif |
1da177e4 LT |
469 | .endm |
470 | ||
471 | .macro __BUILD_count exception | |
472 | LONG_L t0,exception_count_\exception | |
105c22c5 | 473 | LONG_ADDIU t0, 1 |
1da177e4 LT |
474 | LONG_S t0,exception_count_\exception |
475 | .comm exception_count\exception, 8, 8 | |
476 | .endm | |
477 | ||
478 | .macro __BUILD_HANDLER exception handler clear verbose ext | |
479 | .align 5 | |
480 | NESTED(handle_\exception, PT_SIZE, sp) | |
481 | .set noat | |
482 | SAVE_ALL | |
483 | FEXPORT(handle_\exception\ext) | |
158d3b2a | 484 | __build_clear_\clear |
1da177e4 LT |
485 | .set at |
486 | __BUILD_\verbose \exception | |
487 | move a0, sp | |
23126692 AN |
488 | PTR_LA ra, ret_from_exception |
489 | j do_\handler | |
1da177e4 LT |
490 | END(handle_\exception) |
491 | .endm | |
492 | ||
493 | .macro BUILD_HANDLER exception handler clear verbose | |
70342287 | 494 | __BUILD_HANDLER \exception \handler \clear \verbose _int |
1da177e4 LT |
495 | .endm |
496 | ||
497 | BUILD_HANDLER adel ade ade silent /* #4 */ | |
498 | BUILD_HANDLER ades ade ade silent /* #5 */ | |
499 | BUILD_HANDLER ibe be cli silent /* #6 */ | |
500 | BUILD_HANDLER dbe be cli silent /* #7 */ | |
501 | BUILD_HANDLER bp bp sti silent /* #9 */ | |
502 | BUILD_HANDLER ri ri sti silent /* #10 */ | |
503 | BUILD_HANDLER cpu cpu sti silent /* #11 */ | |
504 | BUILD_HANDLER ov ov sti silent /* #12 */ | |
505 | BUILD_HANDLER tr tr sti silent /* #13 */ | |
091be550 | 506 | BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ |
1da177e4 | 507 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
75b5b5e0 | 508 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ |
1db1af84 | 509 | BUILD_HANDLER msa msa sti silent /* #21 */ |
1da177e4 | 510 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
70342287 | 511 | #ifdef CONFIG_HARDWARE_WATCHPOINTS |
8bc6d05b DD |
512 | /* |
513 | * For watch, interrupts will be enabled after the watch | |
514 | * registers are read. | |
515 | */ | |
516 | BUILD_HANDLER watch watch cli silent /* #23 */ | |
b67b2b70 | 517 | #else |
1da177e4 | 518 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
b67b2b70 | 519 | #endif |
1da177e4 | 520 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ |
e35a5e35 | 521 | BUILD_HANDLER mt mt sti silent /* #25 */ |
e50c0a8f | 522 | BUILD_HANDLER dsp dsp sti silent /* #26 */ |
1da177e4 LT |
523 | BUILD_HANDLER reserved reserved sti verbose /* others */ |
524 | ||
5b10496b | 525 | .align 5 |
5a341331 | 526 | LEAF(handle_ri_rdhwr_tlbp) |
5b10496b AN |
527 | .set push |
528 | .set noat | |
529 | .set noreorder | |
530 | /* check if TLB contains a entry for EPC */ | |
531 | MFC0 k1, CP0_ENTRYHI | |
2db003a5 | 532 | andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX |
5b10496b | 533 | MFC0 k0, CP0_EPC |
105c22c5 JH |
534 | PTR_SRL k0, _PAGE_SHIFT + 1 |
535 | PTR_SLL k0, _PAGE_SHIFT + 1 | |
5b10496b AN |
536 | or k1, k0 |
537 | MTC0 k1, CP0_ENTRYHI | |
538 | mtc0_tlbw_hazard | |
539 | tlbp | |
540 | tlb_probe_hazard | |
541 | mfc0 k1, CP0_INDEX | |
542 | .set pop | |
543 | bltz k1, handle_ri /* slow path */ | |
544 | /* fall thru */ | |
5a341331 | 545 | END(handle_ri_rdhwr_tlbp) |
5b10496b AN |
546 | |
547 | LEAF(handle_ri_rdhwr) | |
548 | .set push | |
549 | .set noat | |
550 | .set noreorder | |
2a0b24f5 SH |
551 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ |
552 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ | |
5b10496b | 553 | MFC0 k1, CP0_EPC |
2a0b24f5 | 554 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) |
105c22c5 JH |
555 | and k0, k1, 1 |
556 | beqz k0, 1f | |
557 | xor k1, k0 | |
558 | lhu k0, (k1) | |
559 | lhu k1, 2(k1) | |
560 | ins k1, k0, 16, 16 | |
561 | lui k0, 0x007d | |
562 | b docheck | |
563 | ori k0, 0x6b3c | |
2a0b24f5 | 564 | 1: |
105c22c5 JH |
565 | lui k0, 0x7c03 |
566 | lw k1, (k1) | |
567 | ori k0, 0xe83b | |
2a0b24f5 | 568 | #else |
105c22c5 JH |
569 | andi k0, k1, 1 |
570 | bnez k0, handle_ri | |
571 | lui k0, 0x7c03 | |
572 | lw k1, (k1) | |
573 | ori k0, 0xe83b | |
2a0b24f5 | 574 | #endif |
105c22c5 | 575 | .set reorder |
2a0b24f5 | 576 | docheck: |
5b10496b | 577 | bne k0, k1, handle_ri /* if not ours */ |
2a0b24f5 SH |
578 | |
579 | isrdhwr: | |
5b10496b AN |
580 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ |
581 | get_saved_sp /* k1 := current_thread_info */ | |
582 | .set noreorder | |
583 | MFC0 k0, CP0_EPC | |
584 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
585 | ori k1, _THREAD_MASK | |
586 | xori k1, _THREAD_MASK | |
587 | LONG_L v1, TI_TP_VALUE(k1) | |
588 | LONG_ADDIU k0, 4 | |
589 | jr k0 | |
590 | rfe | |
591 | #else | |
619b6e18 | 592 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
5b10496b | 593 | LONG_ADDIU k0, 4 /* stall on $k0 */ |
619b6e18 MR |
594 | #else |
595 | .set at=v1 | |
596 | LONG_ADDIU k0, 4 | |
597 | .set noat | |
598 | #endif | |
5b10496b AN |
599 | MTC0 k0, CP0_EPC |
600 | /* I hope three instructions between MTC0 and ERET are enough... */ | |
601 | ori k1, _THREAD_MASK | |
602 | xori k1, _THREAD_MASK | |
603 | LONG_L v1, TI_TP_VALUE(k1) | |
a809d460 | 604 | .set arch=r4000 |
5b10496b AN |
605 | eret |
606 | .set mips0 | |
607 | #endif | |
608 | .set pop | |
609 | END(handle_ri_rdhwr) | |
610 | ||
875d43e7 | 611 | #ifdef CONFIG_64BIT |
1da177e4 LT |
612 | /* A temporary overflow handler used by check_daddi(). */ |
613 | ||
614 | __INIT | |
615 | ||
616 | BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ | |
617 | #endif |