]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/mips/netlogic/common/reset.S
MIPS: Netlogic: L1D cacheflush before thread enable on XLPII
[mirror_ubuntu-bionic-kernel.git] / arch / mips / netlogic / common / reset.S
CommitLineData
9584c55a
J
1/*
2 * Copyright 2003-2013 Broadcom Corporation.
3 * All Rights Reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/init.h>
36
37#include <asm/asm.h>
38#include <asm/asm-offsets.h>
ed8dfc46 39#include <asm/cacheops.h>
9584c55a
J
40#include <asm/regdef.h>
41#include <asm/mipsregs.h>
42#include <asm/stackframe.h>
43#include <asm/asmmacro.h>
44#include <asm/addrspace.h>
45
46#include <asm/netlogic/common.h>
47
48#include <asm/netlogic/xlp-hal/iomap.h>
49#include <asm/netlogic/xlp-hal/xlp.h>
50#include <asm/netlogic/xlp-hal/sys.h>
51#include <asm/netlogic/xlp-hal/cpucontrol.h>
52
53#define CP0_EBASE $15
d3b94285
J
54#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
55 XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
9584c55a
J
56 SYS_CPU_NONCOHERENT_MODE * 4
57
9584c55a
J
58/* Enable XLP features and workarounds in the LSU */
59.macro xlp_config_lsu
60 li t0, LSU_DEFEATURE
61 mfcr t1, t0
62
63 lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
64 or t1, t1, t2
9584c55a
J
65 mtcr t1, t0
66
67 li t0, ICU_DEFEATURE
68 mfcr t1, t0
69 ori t1, 0x1000 /* Enable Icache partitioning */
70 mtcr t1, t0
71
9584c55a
J
72 li t0, SCHED_DEFEATURE
73 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
74 mtcr t1, t0
9584c55a
J
75.endm
76
77/*
ed8dfc46
YS
78 * L1D cache has to be flushed before enabling threads in XLP.
79 * On XLP8xx/XLP3xx, we do a low level flush using processor control
80 * registers. On XLPII CPUs, usual cache instructions work.
9584c55a
J
81 */
82.macro xlp_flush_l1_dcache
ed8dfc46
YS
83 mfc0 t0, CP0_EBASE, 0
84 andi t0, t0, 0xff00
85 slt t1, t0, 0x1200
86 beqz t1, 15f
87 nop
88
89 /* XLP8xx low level cache flush */
9584c55a
J
90 li t0, LSU_DEBUG_DATA0
91 li t1, LSU_DEBUG_ADDR
92 li t2, 0 /* index */
93 li t3, 0x1000 /* loop count */
d3b94285 9411:
9584c55a
J
95 sll v0, t2, 5
96 mtcr zero, t0
97 ori v1, v0, 0x3 /* way0 | write_enable | write_active */
98 mtcr v1, t1
d3b94285 9912:
9584c55a
J
100 mfcr v1, t1
101 andi v1, 0x1 /* wait for write_active == 0 */
d3b94285 102 bnez v1, 12b
9584c55a
J
103 nop
104 mtcr zero, t0
105 ori v1, v0, 0x7 /* way1 | write_enable | write_active */
106 mtcr v1, t1
d3b94285 10713:
9584c55a
J
108 mfcr v1, t1
109 andi v1, 0x1 /* wait for write_active == 0 */
d3b94285 110 bnez v1, 13b
9584c55a
J
111 nop
112 addi t2, 1
d3b94285 113 bne t3, t2, 11b
9584c55a 114 nop
ed8dfc46
YS
115 b 17f
116 nop
117
118 /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
11915:
120 li t0, 0x80000000
121 li t1, 0x80010000
12216: cache Index_Writeback_Inv_D, 0(t0)
123 addiu t0, t0, 32
124 bne t0, t1, 16b
125 nop
12617:
9584c55a
J
127.endm
128
129/*
130 * nlm_reset_entry will be copied to the reset entry point for
131 * XLR and XLP. The XLP cores start here when they are woken up. This
132 * is also the NMI entry point.
133 *
134 * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
135 *
136 * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
137 * location, this will have the thread mask (used when core is woken up)
138 * and the current NMI handler in case we reached here for an NMI.
139 *
140 * When a core or thread is newly woken up, it marks itself ready and
141 * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
142 * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
143 */
144 .set noreorder
145 .set noat
146 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
147
148FEXPORT(nlm_reset_entry)
149 dmtc0 k0, $22, 6
150 dmtc0 k1, $22, 7
151 mfc0 k0, CP0_STATUS
152 li k1, 0x80000
153 and k1, k0, k1
154 beqz k1, 1f /* go to real reset entry */
155 nop
156 li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
157 ld k0, BOOT_NMI_HANDLER(k1)
158 jr k0
159 nop
160
1611: /* Entry point on core wakeup */
162 mfc0 t0, CP0_EBASE, 1
163 mfc0 t1, CP0_EBASE, 1
164 srl t1, 5
165 andi t1, 0x3 /* t1 <- node */
166 li t2, 0x40000
167 mul t3, t2, t1 /* t3 = node * 0x40000 */
168 srl t0, t0, 2
169 and t0, t0, 0x7 /* t0 <- core */
170 li t1, 0x1
171 sll t0, t1, t0
172 nor t0, t0, zero /* t0 <- ~(1 << core) */
d3b94285 173 li t2, SYS_CPU_COHERENT_BASE
9584c55a
J
174 add t2, t2, t3 /* t2 <- SYS offset for node */
175 lw t1, 0(t2)
176 and t1, t1, t0
177 sw t1, 0(t2)
178
179 /* read back to ensure complete */
180 lw t1, 0(t2)
181 sync
182
183 /* Configure LSU on Non-0 Cores. */
184 xlp_config_lsu
185 /* FALL THROUGH */
186
187/*
d3b94285 188 * Wake up sibling threads from the initial thread in a core.
9584c55a
J
189 */
190EXPORT(nlm_boot_siblings)
191 /* core L1D flush before enable threads */
192 xlp_flush_l1_dcache
193 /* Enable hw threads by writing to MAP_THREADMODE of the core */
194 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
195 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
196 li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
197 mfcr t2, t0
198 or t2, t2, t1
199 mtcr t2, t0
200
201 /*
202 * The new hardware thread starts at the next instruction
203 * For all the cases other than core 0 thread 0, we will
d3b94285
J
204 * jump to the secondary wait function.
205
206 * NOTE: All GPR contents are lost after the mtcr above!
207 */
9584c55a
J
208 mfc0 v0, CP0_EBASE, 1
209 andi v0, 0x3ff /* v0 <- node/core */
210
6099115e 211 beqz v0, 4f /* boot cpu (cpuid == 0)? */
9584c55a
J
212 nop
213
214 /* setup status reg */
215 move t1, zero
216#ifdef CONFIG_64BIT
217 ori t1, ST0_KX
218#endif
219 mtc0 t1, CP0_STATUS
919f9abb 220
d3b94285 221 /* mark CPU ready */
919f9abb
J
222 li t3, CKSEG1ADDR(RESET_DATA_PHYS)
223 ADDIU t1, t3, BOOT_CPU_READY
9584c55a
J
224 sll v1, v0, 2
225 PTR_ADDU t1, v1
226 li t2, 1
227 sw t2, 0(t1)
228 /* Wait until NMI hits */
2293: wait
fd5f527c 230 b 3b
9584c55a
J
231 nop
232
233 /*
234 * For the boot CPU, we have to restore registers and
235 * return
236 */
2374: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
238 li t1, 0xfadebeef
239 dmtc0 t1, $4, 2 /* restore SP from UserLocal */
240 PTR_SUBU sp, t0, PT_SIZE
241 RESTORE_ALL
242 jr ra
243 nop
244EXPORT(nlm_reset_entry_end)
245
246LEAF(nlm_init_boot_cpu)
247#ifdef CONFIG_CPU_XLP
248 xlp_config_lsu
249#endif
250 jr ra
251 nop
252END(nlm_init_boot_cpu)