]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/netlogic/common/reset.S
MIPS: Netlogic: use branch instead of jump
[mirror_ubuntu-artful-kernel.git] / arch / mips / netlogic / common / reset.S
1 /*
2 * Copyright 2003-2013 Broadcom Corporation.
3 * All Rights Reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/init.h>
36
37 #include <asm/asm.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/regdef.h>
40 #include <asm/mipsregs.h>
41 #include <asm/stackframe.h>
42 #include <asm/asmmacro.h>
43 #include <asm/addrspace.h>
44
45 #include <asm/netlogic/common.h>
46
47 #include <asm/netlogic/xlp-hal/iomap.h>
48 #include <asm/netlogic/xlp-hal/xlp.h>
49 #include <asm/netlogic/xlp-hal/sys.h>
50 #include <asm/netlogic/xlp-hal/cpucontrol.h>
51
52 #define CP0_EBASE $15
53 #define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
54 XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
55 SYS_CPU_NONCOHERENT_MODE * 4
56
57 #define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
58
59 /* Enable XLP features and workarounds in the LSU */
60 .macro xlp_config_lsu
61 li t0, LSU_DEFEATURE
62 mfcr t1, t0
63
64 lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
65 or t1, t1, t2
66 #ifdef XLP_AX_WORKAROUND
67 li t2, ~0xe /* S1RCM */
68 and t1, t1, t2
69 #endif
70 mtcr t1, t0
71
72 li t0, ICU_DEFEATURE
73 mfcr t1, t0
74 ori t1, 0x1000 /* Enable Icache partitioning */
75 mtcr t1, t0
76
77
78 #ifdef XLP_AX_WORKAROUND
79 li t0, SCHED_DEFEATURE
80 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
81 mtcr t1, t0
82 #endif
83 .endm
84
85 /*
86 * Low level flush for L1D cache on XLP, the normal cache ops does
87 * not do the complete and correct cache flush.
88 */
89 .macro xlp_flush_l1_dcache
90 li t0, LSU_DEBUG_DATA0
91 li t1, LSU_DEBUG_ADDR
92 li t2, 0 /* index */
93 li t3, 0x1000 /* loop count */
94 1:
95 sll v0, t2, 5
96 mtcr zero, t0
97 ori v1, v0, 0x3 /* way0 | write_enable | write_active */
98 mtcr v1, t1
99 2:
100 mfcr v1, t1
101 andi v1, 0x1 /* wait for write_active == 0 */
102 bnez v1, 2b
103 nop
104 mtcr zero, t0
105 ori v1, v0, 0x7 /* way1 | write_enable | write_active */
106 mtcr v1, t1
107 3:
108 mfcr v1, t1
109 andi v1, 0x1 /* wait for write_active == 0 */
110 bnez v1, 3b
111 nop
112 addi t2, 1
113 bne t3, t2, 1b
114 nop
115 .endm
116
117 /*
118 * nlm_reset_entry will be copied to the reset entry point for
119 * XLR and XLP. The XLP cores start here when they are woken up. This
120 * is also the NMI entry point.
121 *
122 * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
123 *
124 * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
125 * location, this will have the thread mask (used when core is woken up)
126 * and the current NMI handler in case we reached here for an NMI.
127 *
128 * When a core or thread is newly woken up, it marks itself ready and
129 * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
130 * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
131 */
132 .set noreorder
133 .set noat
134 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
135
136 FEXPORT(nlm_reset_entry)
137 dmtc0 k0, $22, 6
138 dmtc0 k1, $22, 7
139 mfc0 k0, CP0_STATUS
140 li k1, 0x80000
141 and k1, k0, k1
142 beqz k1, 1f /* go to real reset entry */
143 nop
144 li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
145 ld k0, BOOT_NMI_HANDLER(k1)
146 jr k0
147 nop
148
149 1: /* Entry point on core wakeup */
150 mfc0 t0, CP0_EBASE, 1
151 mfc0 t1, CP0_EBASE, 1
152 srl t1, 5
153 andi t1, 0x3 /* t1 <- node */
154 li t2, 0x40000
155 mul t3, t2, t1 /* t3 = node * 0x40000 */
156 srl t0, t0, 2
157 and t0, t0, 0x7 /* t0 <- core */
158 li t1, 0x1
159 sll t0, t1, t0
160 nor t0, t0, zero /* t0 <- ~(1 << core) */
161 li t2, SYS_CPU_COHERENT_BASE(0)
162 add t2, t2, t3 /* t2 <- SYS offset for node */
163 lw t1, 0(t2)
164 and t1, t1, t0
165 sw t1, 0(t2)
166
167 /* read back to ensure complete */
168 lw t1, 0(t2)
169 sync
170
171 /* Configure LSU on Non-0 Cores. */
172 xlp_config_lsu
173 /* FALL THROUGH */
174
175 /*
176 * Wake up sibling threads from the initial thread in
177 * a core.
178 */
179 EXPORT(nlm_boot_siblings)
180 /* core L1D flush before enable threads */
181 xlp_flush_l1_dcache
182 /* Enable hw threads by writing to MAP_THREADMODE of the core */
183 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
184 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
185 li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
186 mfcr t2, t0
187 or t2, t2, t1
188 mtcr t2, t0
189
190 /*
191 * The new hardware thread starts at the next instruction
192 * For all the cases other than core 0 thread 0, we will
193 * jump to the secondary wait function.
194 */
195 mfc0 v0, CP0_EBASE, 1
196 andi v0, 0x3ff /* v0 <- node/core */
197
198 /* Init MMU in the first thread after changing THREAD_MODE
199 * register (Ax Errata?)
200 */
201 andi v1, v0, 0x3 /* v1 <- thread id */
202 bnez v1, 2f
203 nop
204
205 li t0, MMU_SETUP
206 li t1, 0
207 mtcr t1, t0
208 _ehb
209
210 2: beqz v0, 4f /* boot cpu (cpuid == 0)? */
211 nop
212
213 /* setup status reg */
214 move t1, zero
215 #ifdef CONFIG_64BIT
216 ori t1, ST0_KX
217 #endif
218 mtc0 t1, CP0_STATUS
219
220 /* mark CPU ready, careful here, previous mtcr trashed registers */
221 li t3, CKSEG1ADDR(RESET_DATA_PHYS)
222 ADDIU t1, t3, BOOT_CPU_READY
223 sll v1, v0, 2
224 PTR_ADDU t1, v1
225 li t2, 1
226 sw t2, 0(t1)
227 /* Wait until NMI hits */
228 3: wait
229 b 3b
230 nop
231
232 /*
233 * For the boot CPU, we have to restore registers and
234 * return
235 */
236 4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
237 li t1, 0xfadebeef
238 dmtc0 t1, $4, 2 /* restore SP from UserLocal */
239 PTR_SUBU sp, t0, PT_SIZE
240 RESTORE_ALL
241 jr ra
242 nop
243 EXPORT(nlm_reset_entry_end)
244
245 LEAF(nlm_init_boot_cpu)
246 #ifdef CONFIG_CPU_XLP
247 xlp_config_lsu
248 #endif
249 jr ra
250 nop
251 END(nlm_init_boot_cpu)