]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/netlogic/common/smpboot.S
02651748858423561ef8c4477123d44f668cc5e5
[mirror_ubuntu-artful-kernel.git] / arch / mips / netlogic / common / smpboot.S
1 /*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/init.h>
36
37 #include <asm/asm.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/regdef.h>
40 #include <asm/mipsregs.h>
41 #include <asm/stackframe.h>
42 #include <asm/asmmacro.h>
43 #include <asm/addrspace.h>
44
45 #include <asm/netlogic/common.h>
46
47 #include <asm/netlogic/xlp-hal/iomap.h>
48 #include <asm/netlogic/xlp-hal/xlp.h>
49 #include <asm/netlogic/xlp-hal/sys.h>
50 #include <asm/netlogic/xlp-hal/cpucontrol.h>
51
52 #define CP0_EBASE $15
53 #define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
54 XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
55 SYS_CPU_NONCOHERENT_MODE * 4
56
57 #define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
58
59 /* Enable XLP features and workarounds in the LSU */
60 .macro xlp_config_lsu
61 li t0, LSU_DEFEATURE
62 mfcr t1, t0
63
64 lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
65 or t1, t1, t2
66 #ifdef XLP_AX_WORKAROUND
67 li t2, ~0xe /* S1RCM */
68 and t1, t1, t2
69 #endif
70 mtcr t1, t0
71
72 li t0, ICU_DEFEATURE
73 mfcr t1, t0
74 ori t1, 0x1000 /* Enable Icache partitioning */
75 mtcr t1, t0
76
77
78 #ifdef XLP_AX_WORKAROUND
79 li t0, SCHED_DEFEATURE
80 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
81 mtcr t1, t0
82 #endif
83 .endm
84
85 /*
86 * This is the code that will be copied to the reset entry point for
87 * XLR and XLP. The XLP cores start here when they are woken up. This
88 * is also the NMI entry point.
89 */
90 .macro xlp_flush_l1_dcache
91 li t0, LSU_DEBUG_DATA0
92 li t1, LSU_DEBUG_ADDR
93 li t2, 0 /* index */
94 li t3, 0x1000 /* loop count */
95 1:
96 sll v0, t2, 5
97 mtcr zero, t0
98 ori v1, v0, 0x3 /* way0 | write_enable | write_active */
99 mtcr v1, t1
100 2:
101 mfcr v1, t1
102 andi v1, 0x1 /* wait for write_active == 0 */
103 bnez v1, 2b
104 nop
105 mtcr zero, t0
106 ori v1, v0, 0x7 /* way1 | write_enable | write_active */
107 mtcr v1, t1
108 3:
109 mfcr v1, t1
110 andi v1, 0x1 /* wait for write_active == 0 */
111 bnez v1, 3b
112 nop
113 addi t2, 1
114 bne t3, t2, 1b
115 nop
116 .endm
117
118 /*
119 * The cores can come start when they are woken up. This is also the NMI
120 * entry, so check that first.
121 *
122 * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
123 * location, this will have the thread mask (used when core is woken up)
124 * and the current NMI handler in case we reached here for an NMI.
125 *
126 * When a core or thread is newly woken up, it loops in a 'wait'. When
127 * the CPU really needs waking up, we send an NMI to it, with the NMI
128 * handler set to prom_boot_secondary_cpus
129 */
130
131 .set noreorder
132 .set noat
133 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
134
135 FEXPORT(nlm_reset_entry)
136 dmtc0 k0, $22, 6
137 dmtc0 k1, $22, 7
138 mfc0 k0, CP0_STATUS
139 li k1, 0x80000
140 and k1, k0, k1
141 beqz k1, 1f /* go to real reset entry */
142 nop
143 li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
144 ld k0, BOOT_NMI_HANDLER(k1)
145 jr k0
146 nop
147
148 1: /* Entry point on core wakeup */
149 mfc0 t0, CP0_EBASE, 1
150 mfc0 t1, CP0_EBASE, 1
151 srl t1, 5
152 andi t1, 0x3 /* t1 <- node */
153 li t2, 0x40000
154 mul t3, t2, t1 /* t3 = node * 0x40000 */
155 srl t0, t0, 2
156 and t0, t0, 0x7 /* t0 <- core */
157 li t1, 0x1
158 sll t0, t1, t0
159 nor t0, t0, zero /* t0 <- ~(1 << core) */
160 li t2, SYS_CPU_COHERENT_BASE(0)
161 add t2, t2, t3 /* t2 <- SYS offset for node */
162 lw t1, 0(t2)
163 and t1, t1, t0
164 sw t1, 0(t2)
165
166 /* read back to ensure complete */
167 lw t1, 0(t2)
168 sync
169
170 /* Configure LSU on Non-0 Cores. */
171 xlp_config_lsu
172 /* FALL THROUGH */
173
174 /*
175 * Wake up sibling threads from the initial thread in
176 * a core.
177 */
178 EXPORT(nlm_boot_siblings)
179 /* core L1D flush before enable threads */
180 xlp_flush_l1_dcache
181 /* Enable hw threads by writing to MAP_THREADMODE of the core */
182 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
183 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
184 li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
185 mfcr t2, t0
186 or t2, t2, t1
187 mtcr t2, t0
188
189 /*
190 * The new hardware thread starts at the next instruction
191 * For all the cases other than core 0 thread 0, we will
192 * jump to the secondary wait function.
193 */
194 mfc0 v0, CP0_EBASE, 1
195 andi v0, 0x3ff /* v0 <- node/core */
196
197 /* Init MMU in the first thread after changing THREAD_MODE
198 * register (Ax Errata?)
199 */
200 andi v1, v0, 0x3 /* v1 <- thread id */
201 bnez v1, 2f
202 nop
203
204 li t0, MMU_SETUP
205 li t1, 0
206 mtcr t1, t0
207 _ehb
208
209 2: beqz v0, 4f /* boot cpu (cpuid == 0)? */
210 nop
211
212 /* setup status reg */
213 move t1, zero
214 #ifdef CONFIG_64BIT
215 ori t1, ST0_KX
216 #endif
217 mtc0 t1, CP0_STATUS
218 /* mark CPU ready */
219 PTR_LA t1, nlm_cpu_ready
220 sll v1, v0, 2
221 PTR_ADDU t1, v1
222 li t2, 1
223 sw t2, 0(t1)
224 /* Wait until NMI hits */
225 3: wait
226 j 3b
227 nop
228
229 /*
230 * For the boot CPU, we have to restore registers and
231 * return
232 */
233 4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
234 li t1, 0xfadebeef
235 dmtc0 t1, $4, 2 /* restore SP from UserLocal */
236 PTR_SUBU sp, t0, PT_SIZE
237 RESTORE_ALL
238 jr ra
239 nop
240 EXPORT(nlm_reset_entry_end)
241
242 FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
243 xlp_config_lsu
244 dmtc0 sp, $4, 2 /* SP saved in UserLocal */
245 SAVE_ALL
246 sync
247 /* find the location to which nlm_boot_siblings was relocated */
248 li t0, CKSEG1ADDR(RESET_VEC_PHYS)
249 dla t1, nlm_reset_entry
250 dla t2, nlm_boot_siblings
251 dsubu t2, t1
252 daddu t2, t0
253 /* call it */
254 jr t2
255 nop
256 /* not reached */
257
258 __CPUINIT
259 NESTED(nlm_boot_secondary_cpus, 16, sp)
260 /* Initialize CP0 Status */
261 move t1, zero
262 #ifdef CONFIG_64BIT
263 ori t1, ST0_KX
264 #endif
265 mtc0 t1, CP0_STATUS
266 PTR_LA t1, nlm_next_sp
267 PTR_L sp, 0(t1)
268 PTR_LA t1, nlm_next_gp
269 PTR_L gp, 0(t1)
270
271 /* a0 has the processor id */
272 mfc0 a0, CP0_EBASE, 1
273 andi a0, 0x3ff /* a0 <- node/core */
274 PTR_LA t0, nlm_early_init_secondary
275 jalr t0
276 nop
277
278 PTR_LA t0, smp_bootstrap
279 jr t0
280 nop
281 END(nlm_boot_secondary_cpus)
282 __FINIT
283
284 /*
285 * In case of RMIboot bootloader which is used on XLR boards, the CPUs
286 * be already woken up and waiting in bootloader code.
287 * This will get them out of the bootloader code and into linux. Needed
288 * because the bootloader area will be taken and initialized by linux.
289 */
290 __CPUINIT
291 NESTED(nlm_rmiboot_preboot, 16, sp)
292 mfc0 t0, $15, 1 /* read ebase */
293 andi t0, 0x1f /* t0 has the processor_id() */
294 andi t2, t0, 0x3 /* thread num */
295 sll t0, 2 /* offset in cpu array */
296
297 PTR_LA t1, nlm_cpu_ready /* mark CPU ready */
298 PTR_ADDU t1, t0
299 li t3, 1
300 sw t3, 0(t1)
301
302 bnez t2, 1f /* skip thread programming */
303 nop /* for thread id != 0 */
304
305 /*
306 * XLR MMU setup only for first thread in core
307 */
308 li t0, 0x400
309 mfcr t1, t0
310 li t2, 6 /* XLR thread mode mask */
311 nor t3, t2, zero
312 and t2, t1, t2 /* t2 - current thread mode */
313 li v0, CKSEG1ADDR(RESET_DATA_PHYS)
314 lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
315 sll v1, 1
316 beq v1, t2, 1f /* same as request value */
317 nop /* nothing to do */
318
319 and t2, t1, t3 /* mask out old thread mode */
320 or t1, t2, v1 /* put in new value */
321 mtcr t1, t0 /* update core control */
322
323 1: wait
324 j 1b
325 nop
326 END(nlm_rmiboot_preboot)
327 __FINIT