]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/netlogic/common/reset.S
701c4bcb9e4777c031e2af542c4abc89e6d6a9f7
[mirror_ubuntu-artful-kernel.git] / arch / mips / netlogic / common / reset.S
1 /*
2 * Copyright 2003-2013 Broadcom Corporation.
3 * All Rights Reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35
36 #include <asm/asm.h>
37 #include <asm/asm-offsets.h>
38 #include <asm/cpu.h>
39 #include <asm/cacheops.h>
40 #include <asm/regdef.h>
41 #include <asm/mipsregs.h>
42 #include <asm/stackframe.h>
43 #include <asm/asmmacro.h>
44 #include <asm/addrspace.h>
45
46 #include <asm/netlogic/common.h>
47
48 #include <asm/netlogic/xlp-hal/iomap.h>
49 #include <asm/netlogic/xlp-hal/xlp.h>
50 #include <asm/netlogic/xlp-hal/sys.h>
51 #include <asm/netlogic/xlp-hal/cpucontrol.h>
52
53 #define CP0_EBASE $15
54 #define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
55 XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
56 SYS_CPU_NONCOHERENT_MODE * 4
57
58 /* Enable XLP features and workarounds in the LSU */
59 .macro xlp_config_lsu
60 li t0, LSU_DEFEATURE
61 mfcr t1, t0
62
63 lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
64 or t1, t1, t2
65 mtcr t1, t0
66
67 li t0, ICU_DEFEATURE
68 mfcr t1, t0
69 ori t1, 0x1000 /* Enable Icache partitioning */
70 mtcr t1, t0
71
72 li t0, SCHED_DEFEATURE
73 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
74 mtcr t1, t0
75 .endm
76
77 /*
78 * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
79 * register. This is needed before going to C code since the SP can
80 * in this region. Called from all HW threads.
81 */
82 .macro xlp_early_mmu_init
83 mfc0 t0, CP0_PAGEMASK, 1
84 li t1, (1 << 29) /* ELPA bit */
85 or t0, t1
86 mtc0 t0, CP0_PAGEMASK, 1
87 .endm
88
89 /*
90 * L1D cache has to be flushed before enabling threads in XLP.
91 * On XLP8xx/XLP3xx, we do a low level flush using processor control
92 * registers. On XLPII CPUs, usual cache instructions work.
93 */
94 .macro xlp_flush_l1_dcache
95 mfc0 t0, CP0_EBASE, 0
96 andi t0, t0, PRID_IMP_MASK
97 slt t1, t0, 0x1200
98 beqz t1, 15f
99 nop
100
101 /* XLP8xx low level cache flush */
102 li t0, LSU_DEBUG_DATA0
103 li t1, LSU_DEBUG_ADDR
104 li t2, 0 /* index */
105 li t3, 0x1000 /* loop count */
106 11:
107 sll v0, t2, 5
108 mtcr zero, t0
109 ori v1, v0, 0x3 /* way0 | write_enable | write_active */
110 mtcr v1, t1
111 12:
112 mfcr v1, t1
113 andi v1, 0x1 /* wait for write_active == 0 */
114 bnez v1, 12b
115 nop
116 mtcr zero, t0
117 ori v1, v0, 0x7 /* way1 | write_enable | write_active */
118 mtcr v1, t1
119 13:
120 mfcr v1, t1
121 andi v1, 0x1 /* wait for write_active == 0 */
122 bnez v1, 13b
123 nop
124 addi t2, 1
125 bne t3, t2, 11b
126 nop
127 b 17f
128 nop
129
130 /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
131 15:
132 li t0, 0x80000000
133 li t1, 0x80010000
134 16: cache Index_Writeback_Inv_D, 0(t0)
135 addiu t0, t0, 32
136 bne t0, t1, 16b
137 nop
138 17:
139 .endm
140
141 /*
142 * nlm_reset_entry will be copied to the reset entry point for
143 * XLR and XLP. The XLP cores start here when they are woken up. This
144 * is also the NMI entry point.
145 *
146 * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
147 *
148 * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
149 * location, this will have the thread mask (used when core is woken up)
150 * and the current NMI handler in case we reached here for an NMI.
151 *
152 * When a core or thread is newly woken up, it marks itself ready and
153 * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
154 * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
155 */
156 .set noreorder
157 .set noat
158 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
159
160 FEXPORT(nlm_reset_entry)
161 dmtc0 k0, $22, 6
162 dmtc0 k1, $22, 7
163 mfc0 k0, CP0_STATUS
164 li k1, 0x80000
165 and k1, k0, k1
166 beqz k1, 1f /* go to real reset entry */
167 nop
168 li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
169 ld k0, BOOT_NMI_HANDLER(k1)
170 jr k0
171 nop
172
173 1: /* Entry point on core wakeup */
174 mfc0 t0, CP0_EBASE, 0 /* processor ID */
175 andi t0, PRID_IMP_MASK
176 li t1, 0x1500 /* XLP 9xx */
177 beq t0, t1, 2f /* does not need to set coherent */
178 nop
179
180 li t1, 0x1300 /* XLP 5xx */
181 beq t0, t1, 2f /* does not need to set coherent */
182 nop
183
184 /* set bit in SYS coherent register for the core */
185 mfc0 t0, CP0_EBASE, 1
186 mfc0 t1, CP0_EBASE, 1
187 srl t1, 5
188 andi t1, 0x3 /* t1 <- node */
189 li t2, 0x40000
190 mul t3, t2, t1 /* t3 = node * 0x40000 */
191 srl t0, t0, 2
192 and t0, t0, 0x7 /* t0 <- core */
193 li t1, 0x1
194 sll t0, t1, t0
195 nor t0, t0, zero /* t0 <- ~(1 << core) */
196 li t2, SYS_CPU_COHERENT_BASE
197 add t2, t2, t3 /* t2 <- SYS offset for node */
198 lw t1, 0(t2)
199 and t1, t1, t0
200 sw t1, 0(t2)
201
202 /* read back to ensure complete */
203 lw t1, 0(t2)
204 sync
205
206 2:
207 /* Configure LSU on Non-0 Cores. */
208 xlp_config_lsu
209 /* FALL THROUGH */
210
211 /*
212 * Wake up sibling threads from the initial thread in a core.
213 */
214 EXPORT(nlm_boot_siblings)
215 /* core L1D flush before enable threads */
216 xlp_flush_l1_dcache
217 /* save ra and sp, will be used later (only for boot cpu) */
218 dmtc0 ra, $22, 6
219 dmtc0 sp, $22, 7
220 /* Enable hw threads by writing to MAP_THREADMODE of the core */
221 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
222 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
223 li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
224 mfcr t2, t0
225 or t2, t2, t1
226 mtcr t2, t0
227
228 /*
229 * The new hardware thread starts at the next instruction
230 * For all the cases other than core 0 thread 0, we will
231 * jump to the secondary wait function.
232
233 * NOTE: All GPR contents are lost after the mtcr above!
234 */
235 mfc0 v0, CP0_EBASE, 1
236 andi v0, 0x3ff /* v0 <- node/core */
237
238 beqz v0, 4f /* boot cpu (cpuid == 0)? */
239 nop
240
241 /* setup status reg */
242 move t1, zero
243 #ifdef CONFIG_64BIT
244 ori t1, ST0_KX
245 #endif
246 mtc0 t1, CP0_STATUS
247
248 xlp_early_mmu_init
249
250 /* mark CPU ready */
251 li t3, CKSEG1ADDR(RESET_DATA_PHYS)
252 ADDIU t1, t3, BOOT_CPU_READY
253 sll v1, v0, 2
254 PTR_ADDU t1, v1
255 li t2, 1
256 sw t2, 0(t1)
257 /* Wait until NMI hits */
258 3: wait
259 b 3b
260 nop
261
262 /*
263 * For the boot CPU, we have to restore ra and sp and return, rest
264 * of the registers will be restored by the caller
265 */
266 4:
267 dmfc0 ra, $22, 6
268 dmfc0 sp, $22, 7
269 jr ra
270 nop
271 EXPORT(nlm_reset_entry_end)
272
273 LEAF(nlm_init_boot_cpu)
274 #ifdef CONFIG_CPU_XLP
275 xlp_config_lsu
276 xlp_early_mmu_init
277 #endif
278 jr ra
279 nop
280 END(nlm_init_boot_cpu)