]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/kernel/cpu/init.c | |
3 | * | |
4 | * CPU init code | |
5 | * | |
7dd6662a | 6 | * Copyright (C) 2002 - 2009 Paul Mundt |
b638d0b9 | 7 | * Copyright (C) 2003 Richard Curnow |
1da177e4 LT |
8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
aec5e0e1 | 15 | #include <linux/mm.h> |
cd01204b | 16 | #include <linux/log2.h> |
aec5e0e1 | 17 | #include <asm/mmu_context.h> |
1da177e4 | 18 | #include <asm/processor.h> |
7c0f6ba6 | 19 | #include <linux/uaccess.h> |
f3c25758 | 20 | #include <asm/page.h> |
1da177e4 LT |
21 | #include <asm/cacheflush.h> |
22 | #include <asm/cache.h> | |
cd01204b | 23 | #include <asm/elf.h> |
1da177e4 | 24 | #include <asm/io.h> |
aba1030a | 25 | #include <asm/smp.h> |
49f3bfe9 | 26 | #include <asm/sh_bios.h> |
e839ca52 | 27 | #include <asm/setup.h> |
1da177e4 | 28 | |
0ea820cf PM |
29 | #ifdef CONFIG_SH_FPU |
30 | #define cpu_has_fpu 1 | |
31 | #else | |
32 | #define cpu_has_fpu 0 | |
33 | #endif | |
34 | ||
35 | #ifdef CONFIG_SH_DSP | |
36 | #define cpu_has_dsp 1 | |
37 | #else | |
38 | #define cpu_has_dsp 0 | |
c881cbc0 | 39 | #endif |
1da177e4 LT |
40 | |
41 | /* | |
42 | * Generic wrapper for command line arguments to disable on-chip | |
43 | * peripherals (nofpu, nodsp, and so forth). | |
44 | */ | |
0ea820cf | 45 | #define onchip_setup(x) \ |
4603f53a | 46 | static int x##_disabled = !cpu_has_##x; \ |
0ea820cf | 47 | \ |
4603f53a | 48 | static int x##_setup(char *opts) \ |
0ea820cf PM |
49 | { \ |
50 | x##_disabled = 1; \ | |
51 | return 1; \ | |
52 | } \ | |
1da177e4 LT |
53 | __setup("no" __stringify(x), x##_setup); |
54 | ||
55 | onchip_setup(fpu); | |
56 | onchip_setup(dsp); | |
57 | ||
45ed285b PM |
58 | #ifdef CONFIG_SPECULATIVE_EXECUTION |
59 | #define CPUOPM 0xff2f0000 | |
60 | #define CPUOPM_RABD (1 << 5) | |
61 | ||
4603f53a | 62 | static void speculative_execution_init(void) |
45ed285b PM |
63 | { |
64 | /* Clear RABD */ | |
9d56dd3b | 65 | __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); |
45ed285b PM |
66 | |
67 | /* Flush the update */ | |
9d56dd3b | 68 | (void)__raw_readl(CPUOPM); |
45ed285b PM |
69 | ctrl_barrier(); |
70 | } | |
71 | #else | |
72 | #define speculative_execution_init() do { } while (0) | |
73 | #endif | |
74 | ||
7dd6662a PM |
75 | #ifdef CONFIG_CPU_SH4A |
76 | #define EXPMASK 0xff2f0004 | |
77 | #define EXPMASK_RTEDS (1 << 0) | |
78 | #define EXPMASK_BRDSSLP (1 << 1) | |
79 | #define EXPMASK_MMCAW (1 << 4) | |
80 | ||
4603f53a | 81 | static void expmask_init(void) |
7dd6662a PM |
82 | { |
83 | unsigned long expmask = __raw_readl(EXPMASK); | |
84 | ||
85 | /* | |
86 | * Future proofing. | |
87 | * | |
6e8a0d11 PM |
88 | * Disable support for slottable sleep instruction, non-nop |
89 | * instructions in the rte delay slot, and associative writes to | |
90 | * the memory-mapped cache array. | |
7dd6662a | 91 | */ |
6e8a0d11 | 92 | expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); |
7dd6662a PM |
93 | |
94 | __raw_writel(expmask, EXPMASK); | |
95 | ctrl_barrier(); | |
96 | } | |
97 | #else | |
98 | #define expmask_init() do { } while (0) | |
99 | #endif | |
100 | ||
fab88d9f | 101 | /* 2nd-level cache init */ |
2dc2f8e0 | 102 | void __attribute__ ((weak)) l2_cache_init(void) |
fab88d9f KM |
103 | { |
104 | } | |
105 | ||
1da177e4 LT |
106 | /* |
107 | * Generic first-level cache init | |
108 | */ | |
5a846aba | 109 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2) |
2dc2f8e0 | 110 | static void cache_init(void) |
1da177e4 LT |
111 | { |
112 | unsigned long ccr, flags; | |
113 | ||
cbaa118e | 114 | jump_to_uncached(); |
a5f6ea29 | 115 | ccr = __raw_readl(SH_CCR); |
1da177e4 LT |
116 | |
117 | /* | |
b638d0b9 RC |
118 | * At this point we don't know whether the cache is enabled or not - a |
119 | * bootloader may have enabled it. There are at least 2 things that | |
120 | * could be dirty in the cache at this point: | |
121 | * 1. kernel command line set up by boot loader | |
122 | * 2. spilled registers from the prolog of this function | |
123 | * => before re-initialising the cache, we must do a purge of the whole | |
124 | * cache out to memory for safety. As long as nothing is spilled | |
125 | * during the loop to lines that have already been done, this is safe. | |
126 | * - RPC | |
1da177e4 LT |
127 | */ |
128 | if (ccr & CCR_CACHE_ENABLE) { | |
129 | unsigned long ways, waysize, addrstart; | |
130 | ||
11c19656 | 131 | waysize = current_cpu_data.dcache.sets; |
1da177e4 | 132 | |
9d4436a6 | 133 | #ifdef CCR_CACHE_ORA |
1da177e4 LT |
134 | /* |
135 | * If the OC is already in RAM mode, we only have | |
136 | * half of the entries to flush.. | |
137 | */ | |
138 | if (ccr & CCR_CACHE_ORA) | |
139 | waysize >>= 1; | |
9d4436a6 | 140 | #endif |
1da177e4 | 141 | |
11c19656 | 142 | waysize <<= current_cpu_data.dcache.entry_shift; |
1da177e4 LT |
143 | |
144 | #ifdef CCR_CACHE_EMODE | |
145 | /* If EMODE is not set, we only have 1 way to flush. */ | |
146 | if (!(ccr & CCR_CACHE_EMODE)) | |
147 | ways = 1; | |
148 | else | |
149 | #endif | |
11c19656 | 150 | ways = current_cpu_data.dcache.ways; |
1da177e4 LT |
151 | |
152 | addrstart = CACHE_OC_ADDRESS_ARRAY; | |
153 | do { | |
154 | unsigned long addr; | |
155 | ||
156 | for (addr = addrstart; | |
157 | addr < addrstart + waysize; | |
11c19656 | 158 | addr += current_cpu_data.dcache.linesz) |
9d56dd3b | 159 | __raw_writel(0, addr); |
1da177e4 | 160 | |
11c19656 | 161 | addrstart += current_cpu_data.dcache.way_incr; |
1da177e4 LT |
162 | } while (--ways); |
163 | } | |
164 | ||
165 | /* | |
166 | * Default CCR values .. enable the caches | |
167 | * and invalidate them immediately.. | |
168 | */ | |
169 | flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; | |
170 | ||
171 | #ifdef CCR_CACHE_EMODE | |
172 | /* Force EMODE if possible */ | |
11c19656 | 173 | if (current_cpu_data.dcache.ways > 1) |
1da177e4 | 174 | flags |= CCR_CACHE_EMODE; |
b638d0b9 RC |
175 | else |
176 | flags &= ~CCR_CACHE_EMODE; | |
1da177e4 LT |
177 | #endif |
178 | ||
e7bd34a1 PM |
179 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
180 | /* Write-through */ | |
1da177e4 | 181 | flags |= CCR_CACHE_WT; |
e7bd34a1 PM |
182 | #elif defined(CONFIG_CACHE_WRITEBACK) |
183 | /* Write-back */ | |
1da177e4 | 184 | flags |= CCR_CACHE_CB; |
e7bd34a1 PM |
185 | #else |
186 | /* Off */ | |
187 | flags &= ~CCR_CACHE_ENABLE; | |
1da177e4 LT |
188 | #endif |
189 | ||
fab88d9f KM |
190 | l2_cache_init(); |
191 | ||
a5f6ea29 | 192 | __raw_writel(flags, SH_CCR); |
cbaa118e | 193 | back_to_cached(); |
1da177e4 | 194 | } |
27a511c6 PM |
195 | #else |
196 | #define cache_init() do { } while (0) | |
197 | #endif | |
1da177e4 | 198 | |
cd01204b PM |
199 | #define CSHAPE(totalsize, linesize, assoc) \ |
200 | ((totalsize & ~0xff) | (linesize << 4) | assoc) | |
201 | ||
202 | #define CACHE_DESC_SHAPE(desc) \ | |
203 | CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) | |
204 | ||
205 | static void detect_cache_shape(void) | |
206 | { | |
207 | l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); | |
208 | ||
209 | if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) | |
210 | l1i_cache_shape = l1d_cache_shape; | |
211 | else | |
212 | l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); | |
213 | ||
214 | if (current_cpu_data.flags & CPU_HAS_L2_CACHE) | |
215 | l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); | |
216 | else | |
217 | l2_cache_shape = -1; /* No S-cache */ | |
218 | } | |
219 | ||
4603f53a | 220 | static void fpu_init(void) |
0ea820cf PM |
221 | { |
222 | /* Disable the FPU */ | |
223 | if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { | |
224 | printk("FPU Disabled\n"); | |
225 | current_cpu_data.flags &= ~CPU_HAS_FPU; | |
226 | } | |
227 | ||
228 | disable_fpu(); | |
229 | clear_used_math(); | |
230 | } | |
231 | ||
1da177e4 | 232 | #ifdef CONFIG_SH_DSP |
4603f53a | 233 | static void release_dsp(void) |
1da177e4 LT |
234 | { |
235 | unsigned long sr; | |
236 | ||
237 | /* Clear SR.DSP bit */ | |
238 | __asm__ __volatile__ ( | |
239 | "stc\tsr, %0\n\t" | |
240 | "and\t%1, %0\n\t" | |
241 | "ldc\t%0, sr\n\t" | |
242 | : "=&r" (sr) | |
243 | : "r" (~SR_DSP) | |
244 | ); | |
245 | } | |
246 | ||
4603f53a | 247 | static void dsp_init(void) |
1da177e4 LT |
248 | { |
249 | unsigned long sr; | |
250 | ||
251 | /* | |
252 | * Set the SR.DSP bit, wait for one instruction, and then read | |
253 | * back the SR value. | |
254 | */ | |
255 | __asm__ __volatile__ ( | |
256 | "stc\tsr, %0\n\t" | |
257 | "or\t%1, %0\n\t" | |
258 | "ldc\t%0, sr\n\t" | |
259 | "nop\n\t" | |
260 | "stc\tsr, %0\n\t" | |
261 | : "=&r" (sr) | |
262 | : "r" (SR_DSP) | |
263 | ); | |
264 | ||
265 | /* If the DSP bit is still set, this CPU has a DSP */ | |
266 | if (sr & SR_DSP) | |
11c19656 | 267 | current_cpu_data.flags |= CPU_HAS_DSP; |
1da177e4 | 268 | |
0ea820cf PM |
269 | /* Disable the DSP */ |
270 | if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { | |
271 | printk("DSP Disabled\n"); | |
272 | current_cpu_data.flags &= ~CPU_HAS_DSP; | |
273 | } | |
274 | ||
1da177e4 LT |
275 | /* Now that we've determined the DSP status, clear the DSP bit. */ |
276 | release_dsp(); | |
277 | } | |
0ea820cf | 278 | #else |
4603f53a | 279 | static inline void dsp_init(void) { } |
1da177e4 LT |
280 | #endif /* CONFIG_SH_DSP */ |
281 | ||
282 | /** | |
4a6feab0 | 283 | * cpu_init |
1da177e4 | 284 | * |
7025bec9 PM |
285 | * This is our initial entry point for each CPU, and is invoked on the |
286 | * boot CPU prior to calling start_kernel(). For SMP, a combination of | |
287 | * this and start_secondary() will bring up each processor to a ready | |
288 | * state prior to hand forking the idle loop. | |
1da177e4 | 289 | * |
7025bec9 PM |
290 | * We do all of the basic processor init here, including setting up |
291 | * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and | |
292 | * subsequently platform_setup()) things like determining the CPU | |
293 | * subtype and initial configuration will all be done. | |
1da177e4 LT |
294 | * |
295 | * Each processor family is still responsible for doing its own probing | |
a9079ca0 | 296 | * and cache configuration in cpu_probe(). |
1da177e4 | 297 | */ |
4603f53a | 298 | asmlinkage void cpu_init(void) |
1da177e4 | 299 | { |
aba1030a PM |
300 | current_thread_info()->cpu = hard_smp_processor_id(); |
301 | ||
1da177e4 | 302 | /* First, probe the CPU */ |
a9079ca0 | 303 | cpu_probe(); |
1da177e4 | 304 | |
ffe1b4e9 PM |
305 | if (current_cpu_data.type == CPU_SH_NONE) |
306 | panic("Unknown CPU"); | |
307 | ||
27a511c6 PM |
308 | /* First setup the rest of the I-cache info */ |
309 | current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - | |
310 | current_cpu_data.icache.linesz; | |
311 | ||
312 | current_cpu_data.icache.way_size = current_cpu_data.icache.sets * | |
313 | current_cpu_data.icache.linesz; | |
314 | ||
315 | /* And the D-cache too */ | |
316 | current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - | |
317 | current_cpu_data.dcache.linesz; | |
318 | ||
319 | current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * | |
320 | current_cpu_data.dcache.linesz; | |
321 | ||
1da177e4 LT |
322 | /* Init the cache */ |
323 | cache_init(); | |
324 | ||
cd01204b | 325 | if (raw_smp_processor_id() == 0) { |
57155c65 | 326 | #ifdef CONFIG_MMU |
aba1030a PM |
327 | shm_align_mask = max_t(unsigned long, |
328 | current_cpu_data.dcache.way_size - 1, | |
329 | PAGE_SIZE - 1); | |
57155c65 RF |
330 | #else |
331 | shm_align_mask = PAGE_SIZE - 1; | |
332 | #endif | |
f3c25758 | 333 | |
cd01204b PM |
334 | /* Boot CPU sets the cache shape */ |
335 | detect_cache_shape(); | |
336 | } | |
337 | ||
0ea820cf PM |
338 | fpu_init(); |
339 | dsp_init(); | |
1da177e4 | 340 | |
aec5e0e1 PM |
341 | /* |
342 | * Initialize the per-CPU ASID cache very early, since the | |
343 | * TLB flushing routines depend on this being setup. | |
344 | */ | |
345 | current_cpu_data.asid_cache = NO_CONTEXT; | |
346 | ||
2f98492c PM |
347 | current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; |
348 | ||
45ed285b | 349 | speculative_execution_init(); |
7dd6662a | 350 | expmask_init(); |
0ea820cf | 351 | |
49f3bfe9 PM |
352 | /* Do the rest of the boot processor setup */ |
353 | if (raw_smp_processor_id() == 0) { | |
354 | /* Save off the BIOS VBR, if there is one */ | |
355 | sh_bios_vbr_init(); | |
356 | ||
357 | /* | |
358 | * Setup VBR for boot CPU. Secondary CPUs do this through | |
359 | * start_secondary(). | |
360 | */ | |
361 | per_cpu_trap_init(); | |
362 | ||
363 | /* | |
364 | * Boot processor to setup the FP and extended state | |
365 | * context info. | |
366 | */ | |
0ea820cf | 367 | init_thread_xstate(); |
49f3bfe9 | 368 | } |
1da177e4 | 369 | } |