]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/mips/kernel/cps-vec.S
Merge remote-tracking branches 'asoc/topic/cs35l32', 'asoc/topic/cs35l34', 'asoc...
[mirror_ubuntu-jammy-kernel.git] / arch / mips / kernel / cps-vec.S
1 /*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11 #include <asm/addrspace.h>
12 #include <asm/asm.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/asmmacro.h>
15 #include <asm/cacheops.h>
16 #include <asm/eva.h>
17 #include <asm/mipsregs.h>
18 #include <asm/mipsmtregs.h>
19 #include <asm/pm.h>
20
21 #define GCR_CPC_BASE_OFS 0x0088
22 #define GCR_CL_COHERENCE_OFS 0x2008
23 #define GCR_CL_ID_OFS 0x2028
24
25 #define CPC_CL_VC_STOP_OFS 0x2020
26 #define CPC_CL_VC_RUN_OFS 0x2028
27
28 .extern mips_cm_base
29
30 .set noreorder
31
32 #ifdef CONFIG_64BIT
33 # define STATUS_BITDEPS ST0_KX
34 #else
35 # define STATUS_BITDEPS 0
36 #endif
37
38 #ifdef CONFIG_MIPS_CPS_NS16550
39
40 #define DUMP_EXCEP(name) \
41 PTR_LA a0, 8f; \
42 jal mips_cps_bev_dump; \
43 nop; \
44 TEXT(name)
45
46 #else /* !CONFIG_MIPS_CPS_NS16550 */
47
48 #define DUMP_EXCEP(name)
49
50 #endif /* !CONFIG_MIPS_CPS_NS16550 */
51
52 /*
53 * Set dest to non-zero if the core supports the MT ASE, else zero. If
54 * MT is not supported then branch to nomt.
55 */
56 .macro has_mt dest, nomt
57 mfc0 \dest, CP0_CONFIG, 1
58 bgez \dest, \nomt
59 mfc0 \dest, CP0_CONFIG, 2
60 bgez \dest, \nomt
61 mfc0 \dest, CP0_CONFIG, 3
62 andi \dest, \dest, MIPS_CONF3_MT
63 beqz \dest, \nomt
64 nop
65 .endm
66
67 /*
68 * Set dest to non-zero if the core supports MIPSr6 multithreading
69 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
70 * branch to nomt.
71 */
72 .macro has_vp dest, nomt
73 mfc0 \dest, CP0_CONFIG, 1
74 bgez \dest, \nomt
75 mfc0 \dest, CP0_CONFIG, 2
76 bgez \dest, \nomt
77 mfc0 \dest, CP0_CONFIG, 3
78 bgez \dest, \nomt
79 mfc0 \dest, CP0_CONFIG, 4
80 bgez \dest, \nomt
81 mfc0 \dest, CP0_CONFIG, 5
82 andi \dest, \dest, MIPS_CONF5_VP
83 beqz \dest, \nomt
84 nop
85 .endm
86
87 /* Calculate an uncached address for the CM GCRs */
88 .macro cmgcrb dest
89 .set push
90 .set noat
91 MFC0 $1, CP0_CMGCRBASE
92 PTR_SLL $1, $1, 4
93 PTR_LI \dest, UNCAC_BASE
94 PTR_ADDU \dest, \dest, $1
95 .set pop
96 .endm
97
98 .section .text.cps-vec
99 .balign 0x1000
100
101 LEAF(mips_cps_core_entry)
102 /*
103 * These first 4 bytes will be patched by cps_smp_setup to load the
104 * CCA to use into register s0.
105 */
106 .word 0
107
108 /* Check whether we're here due to an NMI */
109 mfc0 k0, CP0_STATUS
110 and k0, k0, ST0_NMI
111 beqz k0, not_nmi
112 nop
113
114 /* This is an NMI */
115 PTR_LA k0, nmi_handler
116 jr k0
117 nop
118
119 not_nmi:
120 /* Setup Cause */
121 li t0, CAUSEF_IV
122 mtc0 t0, CP0_CAUSE
123
124 /* Setup Status */
125 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
126 mtc0 t0, CP0_STATUS
127
128 /* Skip cache & coherence setup if we're already coherent */
129 cmgcrb v1
130 lw s7, GCR_CL_COHERENCE_OFS(v1)
131 bnez s7, 1f
132 nop
133
134 /* Initialize the L1 caches */
135 jal mips_cps_cache_init
136 nop
137
138 /* Enter the coherent domain */
139 li t0, 0xff
140 sw t0, GCR_CL_COHERENCE_OFS(v1)
141 ehb
142
143 /* Set Kseg0 CCA to that in s0 */
144 1: mfc0 t0, CP0_CONFIG
145 ori t0, 0x7
146 xori t0, 0x7
147 or t0, t0, s0
148 mtc0 t0, CP0_CONFIG
149 ehb
150
151 /* Jump to kseg0 */
152 PTR_LA t0, 1f
153 jr t0
154 nop
155
156 /*
157 * We're up, cached & coherent. Perform any EVA initialization necessary
158 * before we access memory.
159 */
160 1: eva_init
161
162 /* Retrieve boot configuration pointers */
163 jal mips_cps_get_bootcfg
164 nop
165
166 /* Skip core-level init if we started up coherent */
167 bnez s7, 1f
168 nop
169
170 /* Perform any further required core-level initialisation */
171 jal mips_cps_core_init
172 nop
173
174 /*
175 * Boot any other VPEs within this core that should be online, and
176 * deactivate this VPE if it should be offline.
177 */
178 move a1, t9
179 jal mips_cps_boot_vpes
180 move a0, v0
181
182 /* Off we go! */
183 1: PTR_L t1, VPEBOOTCFG_PC(v1)
184 PTR_L gp, VPEBOOTCFG_GP(v1)
185 PTR_L sp, VPEBOOTCFG_SP(v1)
186 jr t1
187 nop
188 END(mips_cps_core_entry)
189
190 .org 0x200
191 LEAF(excep_tlbfill)
192 DUMP_EXCEP("TLB Fill")
193 b .
194 nop
195 END(excep_tlbfill)
196
197 .org 0x280
198 LEAF(excep_xtlbfill)
199 DUMP_EXCEP("XTLB Fill")
200 b .
201 nop
202 END(excep_xtlbfill)
203
204 .org 0x300
205 LEAF(excep_cache)
206 DUMP_EXCEP("Cache")
207 b .
208 nop
209 END(excep_cache)
210
211 .org 0x380
212 LEAF(excep_genex)
213 DUMP_EXCEP("General")
214 b .
215 nop
216 END(excep_genex)
217
218 .org 0x400
219 LEAF(excep_intex)
220 DUMP_EXCEP("Interrupt")
221 b .
222 nop
223 END(excep_intex)
224
225 .org 0x480
226 LEAF(excep_ejtag)
227 PTR_LA k0, ejtag_debug_handler
228 jr k0
229 nop
230 END(excep_ejtag)
231
232 LEAF(mips_cps_core_init)
233 #ifdef CONFIG_MIPS_MT_SMP
234 /* Check that the core implements the MT ASE */
235 has_mt t0, 3f
236
237 .set push
238 .set MIPS_ISA_LEVEL_RAW
239 .set mt
240
241 /* Only allow 1 TC per VPE to execute... */
242 dmt
243
244 /* ...and for the moment only 1 VPE */
245 dvpe
246 PTR_LA t1, 1f
247 jr.hb t1
248 nop
249
250 /* Enter VPE configuration state */
251 1: mfc0 t0, CP0_MVPCONTROL
252 ori t0, t0, MVPCONTROL_VPC
253 mtc0 t0, CP0_MVPCONTROL
254
255 /* Retrieve the number of VPEs within the core */
256 mfc0 t0, CP0_MVPCONF0
257 srl t0, t0, MVPCONF0_PVPE_SHIFT
258 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
259 addiu ta3, t0, 1
260
261 /* If there's only 1, we're done */
262 beqz t0, 2f
263 nop
264
265 /* Loop through each VPE within this core */
266 li ta1, 1
267
268 1: /* Operate on the appropriate TC */
269 mtc0 ta1, CP0_VPECONTROL
270 ehb
271
272 /* Bind TC to VPE (1:1 TC:VPE mapping) */
273 mttc0 ta1, CP0_TCBIND
274
275 /* Set exclusive TC, non-active, master */
276 li t0, VPECONF0_MVP
277 sll t1, ta1, VPECONF0_XTC_SHIFT
278 or t0, t0, t1
279 mttc0 t0, CP0_VPECONF0
280
281 /* Set TC non-active, non-allocatable */
282 mttc0 zero, CP0_TCSTATUS
283
284 /* Set TC halted */
285 li t0, TCHALT_H
286 mttc0 t0, CP0_TCHALT
287
288 /* Next VPE */
289 addiu ta1, ta1, 1
290 slt t0, ta1, ta3
291 bnez t0, 1b
292 nop
293
294 /* Leave VPE configuration state */
295 2: mfc0 t0, CP0_MVPCONTROL
296 xori t0, t0, MVPCONTROL_VPC
297 mtc0 t0, CP0_MVPCONTROL
298
299 3: .set pop
300 #endif
301 jr ra
302 nop
303 END(mips_cps_core_init)
304
305 /**
306 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
307 *
308 * Returns: pointer to struct core_boot_config in v0, pointer to
309 * struct vpe_boot_config in v1, VPE ID in t9
310 */
311 LEAF(mips_cps_get_bootcfg)
312 /* Calculate a pointer to this cores struct core_boot_config */
313 cmgcrb t0
314 lw t0, GCR_CL_ID_OFS(t0)
315 li t1, COREBOOTCFG_SIZE
316 mul t0, t0, t1
317 PTR_LA t1, mips_cps_core_bootcfg
318 PTR_L t1, 0(t1)
319 PTR_ADDU v0, t0, t1
320
321 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
322 li t9, 0
323 #if defined(CONFIG_CPU_MIPSR6)
324 has_vp ta2, 1f
325
326 /*
327 * Assume non-contiguous numbering. Perhaps some day we'll need
328 * to handle contiguous VP numbering, but no such systems yet
329 * exist.
330 */
331 mfc0 t9, CP0_GLOBALNUMBER
332 andi t9, t9, MIPS_GLOBALNUMBER_VP
333 #elif defined(CONFIG_MIPS_MT_SMP)
334 has_mt ta2, 1f
335
336 /* Find the number of VPEs present in the core */
337 mfc0 t1, CP0_MVPCONF0
338 srl t1, t1, MVPCONF0_PVPE_SHIFT
339 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
340 addiu t1, t1, 1
341
342 /* Calculate a mask for the VPE ID from EBase.CPUNum */
343 clz t1, t1
344 li t2, 31
345 subu t1, t2, t1
346 li t2, 1
347 sll t1, t2, t1
348 addiu t1, t1, -1
349
350 /* Retrieve the VPE ID from EBase.CPUNum */
351 mfc0 t9, $15, 1
352 and t9, t9, t1
353 #endif
354
355 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
356 li t1, VPEBOOTCFG_SIZE
357 mul v1, t9, t1
358 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
359 PTR_ADDU v1, v1, ta3
360
361 jr ra
362 nop
363 END(mips_cps_get_bootcfg)
364
365 LEAF(mips_cps_boot_vpes)
366 lw ta2, COREBOOTCFG_VPEMASK(a0)
367 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
368
369 #if defined(CONFIG_CPU_MIPSR6)
370
371 has_vp t0, 5f
372
373 /* Find base address of CPC */
374 cmgcrb t3
375 PTR_L t1, GCR_CPC_BASE_OFS(t3)
376 PTR_LI t2, ~0x7fff
377 and t1, t1, t2
378 PTR_LI t2, UNCAC_BASE
379 PTR_ADD t1, t1, t2
380
381 /* Start any other VPs that ought to be running */
382 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
383
384 /* Ensure this VP stops running if it shouldn't be */
385 not ta2
386 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
387 ehb
388
389 #elif defined(CONFIG_MIPS_MT)
390
391 .set push
392 .set MIPS_ISA_LEVEL_RAW
393 .set mt
394
395 /* If the core doesn't support MT then return */
396 has_mt t0, 5f
397
398 /* Enter VPE configuration state */
399 dvpe
400 PTR_LA t1, 1f
401 jr.hb t1
402 nop
403 1: mfc0 t1, CP0_MVPCONTROL
404 ori t1, t1, MVPCONTROL_VPC
405 mtc0 t1, CP0_MVPCONTROL
406 ehb
407
408 /* Loop through each VPE */
409 move t8, ta2
410 li ta1, 0
411
412 /* Check whether the VPE should be running. If not, skip it */
413 1: andi t0, ta2, 1
414 beqz t0, 2f
415 nop
416
417 /* Operate on the appropriate TC */
418 mfc0 t0, CP0_VPECONTROL
419 ori t0, t0, VPECONTROL_TARGTC
420 xori t0, t0, VPECONTROL_TARGTC
421 or t0, t0, ta1
422 mtc0 t0, CP0_VPECONTROL
423 ehb
424
425 /* Skip the VPE if its TC is not halted */
426 mftc0 t0, CP0_TCHALT
427 beqz t0, 2f
428 nop
429
430 /* Calculate a pointer to the VPEs struct vpe_boot_config */
431 li t0, VPEBOOTCFG_SIZE
432 mul t0, t0, ta1
433 addu t0, t0, ta3
434
435 /* Set the TC restart PC */
436 lw t1, VPEBOOTCFG_PC(t0)
437 mttc0 t1, CP0_TCRESTART
438
439 /* Set the TC stack pointer */
440 lw t1, VPEBOOTCFG_SP(t0)
441 mttgpr t1, sp
442
443 /* Set the TC global pointer */
444 lw t1, VPEBOOTCFG_GP(t0)
445 mttgpr t1, gp
446
447 /* Copy config from this VPE */
448 mfc0 t0, CP0_CONFIG
449 mttc0 t0, CP0_CONFIG
450
451 /*
452 * Copy the EVA config from this VPE if the CPU supports it.
453 * CONFIG3 must exist to be running MT startup - just read it.
454 */
455 mfc0 t0, CP0_CONFIG, 3
456 and t0, t0, MIPS_CONF3_SC
457 beqz t0, 3f
458 nop
459 mfc0 t0, CP0_SEGCTL0
460 mttc0 t0, CP0_SEGCTL0
461 mfc0 t0, CP0_SEGCTL1
462 mttc0 t0, CP0_SEGCTL1
463 mfc0 t0, CP0_SEGCTL2
464 mttc0 t0, CP0_SEGCTL2
465 3:
466 /* Ensure no software interrupts are pending */
467 mttc0 zero, CP0_CAUSE
468 mttc0 zero, CP0_STATUS
469
470 /* Set TC active, not interrupt exempt */
471 mftc0 t0, CP0_TCSTATUS
472 li t1, ~TCSTATUS_IXMT
473 and t0, t0, t1
474 ori t0, t0, TCSTATUS_A
475 mttc0 t0, CP0_TCSTATUS
476
477 /* Clear the TC halt bit */
478 mttc0 zero, CP0_TCHALT
479
480 /* Set VPE active */
481 mftc0 t0, CP0_VPECONF0
482 ori t0, t0, VPECONF0_VPA
483 mttc0 t0, CP0_VPECONF0
484
485 /* Next VPE */
486 2: srl ta2, ta2, 1
487 addiu ta1, ta1, 1
488 bnez ta2, 1b
489 nop
490
491 /* Leave VPE configuration state */
492 mfc0 t1, CP0_MVPCONTROL
493 xori t1, t1, MVPCONTROL_VPC
494 mtc0 t1, CP0_MVPCONTROL
495 ehb
496 evpe
497
498 /* Check whether this VPE is meant to be running */
499 li t0, 1
500 sll t0, t0, a1
501 and t0, t0, t8
502 bnez t0, 2f
503 nop
504
505 /* This VPE should be offline, halt the TC */
506 li t0, TCHALT_H
507 mtc0 t0, CP0_TCHALT
508 PTR_LA t0, 1f
509 1: jr.hb t0
510 nop
511
512 2: .set pop
513
514 #endif /* CONFIG_MIPS_MT_SMP */
515
516 /* Return */
517 5: jr ra
518 nop
519 END(mips_cps_boot_vpes)
520
521 LEAF(mips_cps_cache_init)
522 /*
523 * Clear the bits used to index the caches. Note that the architecture
524 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
525 * be valid for all MIPS32 CPUs, even those for which said writes are
526 * unnecessary.
527 */
528 mtc0 zero, CP0_TAGLO, 0
529 mtc0 zero, CP0_TAGHI, 0
530 mtc0 zero, CP0_TAGLO, 2
531 mtc0 zero, CP0_TAGHI, 2
532 ehb
533
534 /* Primary cache configuration is indicated by Config1 */
535 mfc0 v0, CP0_CONFIG, 1
536
537 /* Detect I-cache line size */
538 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
539 beqz t0, icache_done
540 li t1, 2
541 sllv t0, t1, t0
542
543 /* Detect I-cache size */
544 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
545 xori t2, t1, 0x7
546 beqz t2, 1f
547 li t3, 32
548 addiu t1, t1, 1
549 sllv t1, t3, t1
550 1: /* At this point t1 == I-cache sets per way */
551 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
552 addiu t2, t2, 1
553 mul t1, t1, t0
554 mul t1, t1, t2
555
556 li a0, CKSEG0
557 PTR_ADD a1, a0, t1
558 1: cache Index_Store_Tag_I, 0(a0)
559 PTR_ADD a0, a0, t0
560 bne a0, a1, 1b
561 nop
562 icache_done:
563
564 /* Detect D-cache line size */
565 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
566 beqz t0, dcache_done
567 li t1, 2
568 sllv t0, t1, t0
569
570 /* Detect D-cache size */
571 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
572 xori t2, t1, 0x7
573 beqz t2, 1f
574 li t3, 32
575 addiu t1, t1, 1
576 sllv t1, t3, t1
577 1: /* At this point t1 == D-cache sets per way */
578 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
579 addiu t2, t2, 1
580 mul t1, t1, t0
581 mul t1, t1, t2
582
583 li a0, CKSEG0
584 PTR_ADDU a1, a0, t1
585 PTR_SUBU a1, a1, t0
586 1: cache Index_Store_Tag_D, 0(a0)
587 bne a0, a1, 1b
588 PTR_ADD a0, a0, t0
589 dcache_done:
590
591 jr ra
592 nop
593 END(mips_cps_cache_init)
594
595 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
596
597 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
598 .macro psstate dest
599 .set push
600 .set noat
601 lw $1, TI_CPU(gp)
602 sll $1, $1, LONGLOG
603 PTR_LA \dest, __per_cpu_offset
604 addu $1, $1, \dest
605 lw $1, 0($1)
606 PTR_LA \dest, cps_cpu_state
607 addu \dest, \dest, $1
608 .set pop
609 .endm
610
611 LEAF(mips_cps_pm_save)
612 /* Save CPU state */
613 SUSPEND_SAVE_REGS
614 psstate t1
615 SUSPEND_SAVE_STATIC
616 jr v0
617 nop
618 END(mips_cps_pm_save)
619
620 LEAF(mips_cps_pm_restore)
621 /* Restore CPU state */
622 psstate t1
623 RESUME_RESTORE_STATIC
624 RESUME_RESTORE_REGS_RETURN
625 END(mips_cps_pm_restore)
626
627 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */