]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/boot/compressed/head.S | |
3 | * | |
4 | * Copyright (C) 1996-2002 Russell King | |
10c2df65 | 5 | * Copyright (C) 2004 Hyok S. Choi (MPU support) |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
1da177e4 | 11 | #include <linux/linkage.h> |
424e5994 | 12 | #include <asm/assembler.h> |
1da177e4 LT |
13 | |
14 | /* | |
15 | * Debugging stuff | |
16 | * | |
17 | * Note that these macros must not contain any code which is not | |
18 | * 100% relocatable. Any attempt to do so will result in a crash. | |
19 | * Please select one of the following when turning on debugging. | |
20 | */ | |
21 | #ifdef DEBUG | |
5cd0c344 | 22 | |
5cd0c344 | 23 | #if defined(CONFIG_DEBUG_ICEDCC) |
7d95ded9 | 24 | |
dfad549d | 25 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
4e6d488a | 26 | .macro loadsp, rb, tmp |
7d95ded9 TL |
27 | .endm |
28 | .macro writeb, ch, rb | |
29 | mcr p14, 0, \ch, c0, c5, 0 | |
30 | .endm | |
c633c3cf | 31 | #elif defined(CONFIG_CPU_XSCALE) |
4e6d488a | 32 | .macro loadsp, rb, tmp |
c633c3cf JCPV |
33 | .endm |
34 | .macro writeb, ch, rb | |
35 | mcr p14, 0, \ch, c8, c0, 0 | |
36 | .endm | |
7d95ded9 | 37 | #else |
4e6d488a | 38 | .macro loadsp, rb, tmp |
1da177e4 | 39 | .endm |
224b5be6 | 40 | .macro writeb, ch, rb |
41a9e680 | 41 | mcr p14, 0, \ch, c1, c0, 0 |
1da177e4 | 42 | .endm |
7d95ded9 TL |
43 | #endif |
44 | ||
5cd0c344 | 45 | #else |
224b5be6 | 46 | |
a09e64fb | 47 | #include <mach/debug-macro.S> |
224b5be6 | 48 | |
5cd0c344 RK |
49 | .macro writeb, ch, rb |
50 | senduart \ch, \rb | |
1da177e4 | 51 | .endm |
5cd0c344 | 52 | |
224b5be6 | 53 | #if defined(CONFIG_ARCH_SA1100) |
4e6d488a | 54 | .macro loadsp, rb, tmp |
1da177e4 | 55 | mov \rb, #0x80000000 @ physical base address |
224b5be6 | 56 | #ifdef CONFIG_DEBUG_LL_SER3 |
1da177e4 | 57 | add \rb, \rb, #0x00050000 @ Ser3 |
224b5be6 | 58 | #else |
1da177e4 | 59 | add \rb, \rb, #0x00010000 @ Ser1 |
224b5be6 | 60 | #endif |
1da177e4 | 61 | .endm |
b130d5c2 | 62 | #elif defined(CONFIG_ARCH_S3C24XX) |
4e6d488a | 63 | .macro loadsp, rb, tmp |
1da177e4 | 64 | mov \rb, #0x50000000 |
c7657846 | 65 | add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT |
1da177e4 | 66 | .endm |
1da177e4 | 67 | #else |
4e6d488a TL |
68 | .macro loadsp, rb, tmp |
69 | addruart \rb, \tmp | |
224b5be6 | 70 | .endm |
1da177e4 | 71 | #endif |
5cd0c344 | 72 | #endif |
1da177e4 LT |
73 | #endif |
74 | ||
75 | .macro kputc,val | |
76 | mov r0, \val | |
77 | bl putc | |
78 | .endm | |
79 | ||
80 | .macro kphex,val,len | |
81 | mov r0, \val | |
82 | mov r1, #\len | |
83 | bl phex | |
84 | .endm | |
85 | ||
86 | .macro debug_reloc_start | |
87 | #ifdef DEBUG | |
88 | kputc #'\n' | |
89 | kphex r6, 8 /* processor id */ | |
90 | kputc #':' | |
91 | kphex r7, 8 /* architecture id */ | |
f12d0d7c | 92 | #ifdef CONFIG_CPU_CP15 |
1da177e4 LT |
93 | kputc #':' |
94 | mrc p15, 0, r0, c1, c0 | |
95 | kphex r0, 8 /* control reg */ | |
f12d0d7c | 96 | #endif |
1da177e4 LT |
97 | kputc #'\n' |
98 | kphex r5, 8 /* decompressed kernel start */ | |
99 | kputc #'-' | |
f4619025 | 100 | kphex r9, 8 /* decompressed kernel end */ |
1da177e4 LT |
101 | kputc #'>' |
102 | kphex r4, 8 /* kernel execution address */ | |
103 | kputc #'\n' | |
104 | #endif | |
105 | .endm | |
106 | ||
107 | .macro debug_reloc_end | |
108 | #ifdef DEBUG | |
109 | kphex r5, 8 /* end of kernel */ | |
110 | kputc #'\n' | |
111 | mov r0, r4 | |
112 | bl memdump /* dump 256 bytes at start of kernel */ | |
113 | #endif | |
114 | .endm | |
115 | ||
116 | .section ".start", #alloc, #execinstr | |
117 | /* | |
118 | * sort out different calling conventions | |
119 | */ | |
120 | .align | |
26e5ca93 | 121 | .arm @ Always enter in ARM state |
1da177e4 LT |
122 | start: |
123 | .type start,#function | |
b11fe388 | 124 | .rept 7 |
1da177e4 LT |
125 | mov r0, r0 |
126 | .endr | |
b11fe388 NP |
127 | ARM( mov r0, r0 ) |
128 | ARM( b 1f ) | |
129 | THUMB( adr r12, BSYM(1f) ) | |
130 | THUMB( bx r12 ) | |
1da177e4 | 131 | |
1da177e4 LT |
132 | .word 0x016f2818 @ Magic numbers to help the loader |
133 | .word start @ absolute load/run zImage address | |
134 | .word _edata @ zImage end address | |
26e5ca93 | 135 | THUMB( .thumb ) |
424e5994 DM |
136 | 1: |
137 | mrs r9, cpsr | |
138 | #ifdef CONFIG_ARM_VIRT_EXT | |
139 | bl __hyp_stub_install @ get into SVC mode, reversibly | |
140 | #endif | |
141 | mov r7, r1 @ save architecture ID | |
f4619025 | 142 | mov r8, r2 @ save atags pointer |
1da177e4 LT |
143 | |
144 | #ifndef __ARM_ARCH_2__ | |
145 | /* | |
146 | * Booting from Angel - need to enter SVC mode and disable | |
147 | * FIQs/IRQs (numeric definitions from angel arm.h source). | |
148 | * We only do this if we were in user mode on entry. | |
149 | */ | |
150 | mrs r2, cpsr @ get current mode | |
151 | tst r2, #3 @ not user? | |
152 | bne not_angel | |
153 | mov r0, #0x17 @ angel_SWIreason_EnterSVC | |
0e056f20 CM |
154 | ARM( swi 0x123456 ) @ angel_SWI_ARM |
155 | THUMB( svc 0xab ) @ angel_SWI_THUMB | |
1da177e4 | 156 | not_angel: |
424e5994 DM |
157 | safe_svcmode_maskall r0 |
158 | msr spsr_cxsf, r9 @ Save the CPU boot mode in | |
159 | @ SPSR | |
1da177e4 LT |
160 | #else |
161 | teqp pc, #0x0c000003 @ turn off interrupts | |
162 | #endif | |
163 | ||
164 | /* | |
165 | * Note that some cache flushing and other stuff may | |
166 | * be needed here - is there an Angel SWI call for this? | |
167 | */ | |
168 | ||
169 | /* | |
170 | * some architecture specific code can be inserted | |
f4619025 | 171 | * by the linker here, but it should preserve r7, r8, and r9. |
1da177e4 LT |
172 | */ |
173 | ||
174 | .text | |
6d7d0ae5 | 175 | |
e69edc79 EM |
176 | #ifdef CONFIG_AUTO_ZRELADDR |
177 | @ determine final kernel image address | |
bfa64c4a DM |
178 | mov r4, pc |
179 | and r4, r4, #0xf8000000 | |
e69edc79 EM |
180 | add r4, r4, #TEXT_OFFSET |
181 | #else | |
9e84ed63 | 182 | ldr r4, =zreladdr |
e69edc79 | 183 | #endif |
1da177e4 | 184 | |
6d7d0ae5 NP |
185 | bl cache_on |
186 | ||
187 | restart: adr r0, LC0 | |
34cc1a8f | 188 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} |
adcc2591 | 189 | ldr sp, [r0, #28] |
6d7d0ae5 NP |
190 | |
191 | /* | |
192 | * We might be running at a different address. We need | |
193 | * to fix up various pointers. | |
194 | */ | |
195 | sub r0, r0, r1 @ calculate the delta offset | |
6d7d0ae5 | 196 | add r6, r6, r0 @ _edata |
34cc1a8f NP |
197 | add r10, r10, r0 @ inflated kernel size location |
198 | ||
199 | /* | |
200 | * The kernel build system appends the size of the | |
201 | * decompressed kernel at the end of the compressed data | |
202 | * in little-endian form. | |
203 | */ | |
204 | ldrb r9, [r10, #0] | |
205 | ldrb lr, [r10, #1] | |
206 | orr r9, r9, lr, lsl #8 | |
207 | ldrb lr, [r10, #2] | |
208 | ldrb r10, [r10, #3] | |
209 | orr r9, r9, lr, lsl #16 | |
210 | orr r9, r9, r10, lsl #24 | |
1da177e4 | 211 | |
6d7d0ae5 NP |
212 | #ifndef CONFIG_ZBOOT_ROM |
213 | /* malloc space is above the relocated stack (64k max) */ | |
214 | add sp, sp, r0 | |
215 | add r10, sp, #0x10000 | |
216 | #else | |
1da177e4 | 217 | /* |
6d7d0ae5 NP |
218 | * With ZBOOT_ROM the bss/stack is non relocatable, |
219 | * but someone could still run this code from RAM, | |
220 | * in which case our reference is _edata. | |
1da177e4 | 221 | */ |
6d7d0ae5 NP |
222 | mov r10, r6 |
223 | #endif | |
224 | ||
e2a6a3aa JB |
225 | mov r5, #0 @ init dtb size to 0 |
226 | #ifdef CONFIG_ARM_APPENDED_DTB | |
227 | /* | |
228 | * r0 = delta | |
229 | * r2 = BSS start | |
230 | * r3 = BSS end | |
231 | * r4 = final kernel address | |
232 | * r5 = appended dtb size (still unknown) | |
233 | * r6 = _edata | |
234 | * r7 = architecture ID | |
235 | * r8 = atags/device tree pointer | |
236 | * r9 = size of decompressed image | |
237 | * r10 = end of this image, including bss/stack/malloc space if non XIP | |
238 | * r11 = GOT start | |
239 | * r12 = GOT end | |
240 | * sp = stack pointer | |
241 | * | |
242 | * if there are device trees (dtb) appended to zImage, advance r10 so that the | |
243 | * dtb data will get relocated along with the kernel if necessary. | |
244 | */ | |
245 | ||
246 | ldr lr, [r6, #0] | |
247 | #ifndef __ARMEB__ | |
248 | ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian | |
249 | #else | |
250 | ldr r1, =0xd00dfeed | |
251 | #endif | |
252 | cmp lr, r1 | |
253 | bne dtb_check_done @ not found | |
254 | ||
b90b9a38 NP |
255 | #ifdef CONFIG_ARM_ATAG_DTB_COMPAT |
256 | /* | |
257 | * OK... Let's do some funky business here. | |
258 | * If we do have a DTB appended to zImage, and we do have | |
259 | * an ATAG list around, we want the later to be translated | |
260 | * and folded into the former here. To be on the safe side, | |
261 | * let's temporarily move the stack away into the malloc | |
262 | * area. No GOT fixup has occurred yet, but none of the | |
263 | * code we're about to call uses any global variable. | |
264 | */ | |
265 | add sp, sp, #0x10000 | |
266 | stmfd sp!, {r0-r3, ip, lr} | |
267 | mov r0, r8 | |
268 | mov r1, r6 | |
269 | sub r2, sp, r6 | |
270 | bl atags_to_fdt | |
271 | ||
272 | /* | |
273 | * If returned value is 1, there is no ATAG at the location | |
274 | * pointed by r8. Try the typical 0x100 offset from start | |
275 | * of RAM and hope for the best. | |
276 | */ | |
277 | cmp r0, #1 | |
531a6a94 NP |
278 | sub r0, r4, #TEXT_OFFSET |
279 | add r0, r0, #0x100 | |
b90b9a38 NP |
280 | mov r1, r6 |
281 | sub r2, sp, r6 | |
9c5fd9e8 | 282 | bleq atags_to_fdt |
b90b9a38 NP |
283 | |
284 | ldmfd sp!, {r0-r3, ip, lr} | |
285 | sub sp, sp, #0x10000 | |
286 | #endif | |
287 | ||
e2a6a3aa JB |
288 | mov r8, r6 @ use the appended device tree |
289 | ||
5ffb04f6 NP |
290 | /* |
291 | * Make sure that the DTB doesn't end up in the final | |
292 | * kernel's .bss area. To do so, we adjust the decompressed | |
293 | * kernel size to compensate if that .bss size is larger | |
294 | * than the relocated code. | |
295 | */ | |
296 | ldr r5, =_kernel_bss_size | |
297 | adr r1, wont_overwrite | |
298 | sub r1, r6, r1 | |
299 | subs r1, r5, r1 | |
300 | addhi r9, r9, r1 | |
301 | ||
e2a6a3aa JB |
302 | /* Get the dtb's size */ |
303 | ldr r5, [r6, #4] | |
304 | #ifndef __ARMEB__ | |
305 | /* convert r5 (dtb size) to little endian */ | |
306 | eor r1, r5, r5, ror #16 | |
307 | bic r1, r1, #0x00ff0000 | |
308 | mov r5, r5, ror #8 | |
309 | eor r5, r5, r1, lsr #8 | |
310 | #endif | |
311 | ||
312 | /* preserve 64-bit alignment */ | |
313 | add r5, r5, #7 | |
314 | bic r5, r5, #7 | |
315 | ||
316 | /* relocate some pointers past the appended dtb */ | |
317 | add r6, r6, r5 | |
318 | add r10, r10, r5 | |
319 | add sp, sp, r5 | |
320 | dtb_check_done: | |
321 | #endif | |
322 | ||
6d7d0ae5 NP |
323 | /* |
324 | * Check to see if we will overwrite ourselves. | |
325 | * r4 = final kernel address | |
6d7d0ae5 NP |
326 | * r9 = size of decompressed image |
327 | * r10 = end of this image, including bss/stack/malloc space if non XIP | |
328 | * We basically want: | |
ea9df3b1 | 329 | * r4 - 16k page directory >= r10 -> OK |
5ffb04f6 | 330 | * r4 + image length <= address of wont_overwrite -> OK |
6d7d0ae5 | 331 | */ |
ea9df3b1 | 332 | add r10, r10, #16384 |
6d7d0ae5 NP |
333 | cmp r4, r10 |
334 | bhs wont_overwrite | |
335 | add r10, r4, r9 | |
5ffb04f6 NP |
336 | adr r9, wont_overwrite |
337 | cmp r10, r9 | |
6d7d0ae5 NP |
338 | bls wont_overwrite |
339 | ||
340 | /* | |
341 | * Relocate ourselves past the end of the decompressed kernel. | |
6d7d0ae5 NP |
342 | * r6 = _edata |
343 | * r10 = end of the decompressed kernel | |
344 | * Because we always copy ahead, we need to do it from the end and go | |
345 | * backward in case the source and destination overlap. | |
346 | */ | |
adcc2591 NP |
347 | /* |
348 | * Bump to the next 256-byte boundary with the size of | |
349 | * the relocation code added. This avoids overwriting | |
350 | * ourself when the offset is small. | |
351 | */ | |
352 | add r10, r10, #((reloc_code_end - restart + 256) & ~255) | |
6d7d0ae5 NP |
353 | bic r10, r10, #255 |
354 | ||
adcc2591 NP |
355 | /* Get start of code we want to copy and align it down. */ |
356 | adr r5, restart | |
357 | bic r5, r5, #31 | |
358 | ||
424e5994 DM |
359 | /* Relocate the hyp vector base if necessary */ |
360 | #ifdef CONFIG_ARM_VIRT_EXT | |
361 | mrs r0, spsr | |
362 | and r0, r0, #MODE_MASK | |
363 | cmp r0, #HYP_MODE | |
364 | bne 1f | |
365 | ||
366 | bl __hyp_get_vectors | |
367 | sub r0, r0, r5 | |
368 | add r0, r0, r10 | |
369 | bl __hyp_set_vectors | |
370 | 1: | |
371 | #endif | |
372 | ||
6d7d0ae5 NP |
373 | sub r9, r6, r5 @ size to copy |
374 | add r9, r9, #31 @ rounded up to a multiple | |
375 | bic r9, r9, #31 @ ... of 32 bytes | |
376 | add r6, r9, r5 | |
377 | add r9, r9, r10 | |
378 | ||
379 | 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} | |
380 | cmp r6, r5 | |
381 | stmdb r9!, {r0 - r3, r10 - r12, lr} | |
382 | bhi 1b | |
383 | ||
384 | /* Preserve offset to relocated code. */ | |
385 | sub r6, r9, r6 | |
386 | ||
7c2527f0 TL |
387 | #ifndef CONFIG_ZBOOT_ROM |
388 | /* cache_clean_flush may use the stack, so relocate it */ | |
389 | add sp, sp, r6 | |
390 | #endif | |
391 | ||
6d7d0ae5 NP |
392 | bl cache_clean_flush |
393 | ||
394 | adr r0, BSYM(restart) | |
395 | add r0, r0, r6 | |
396 | mov pc, r0 | |
397 | ||
398 | wont_overwrite: | |
399 | /* | |
400 | * If delta is zero, we are running at the address we were linked at. | |
401 | * r0 = delta | |
402 | * r2 = BSS start | |
403 | * r3 = BSS end | |
404 | * r4 = kernel execution address | |
e2a6a3aa | 405 | * r5 = appended dtb size (0 if not present) |
6d7d0ae5 NP |
406 | * r7 = architecture ID |
407 | * r8 = atags pointer | |
408 | * r11 = GOT start | |
409 | * r12 = GOT end | |
410 | * sp = stack pointer | |
411 | */ | |
e2a6a3aa | 412 | orrs r1, r0, r5 |
6d7d0ae5 | 413 | beq not_relocated |
e2a6a3aa | 414 | |
98e12b5a | 415 | add r11, r11, r0 |
6d7d0ae5 | 416 | add r12, r12, r0 |
1da177e4 LT |
417 | |
418 | #ifndef CONFIG_ZBOOT_ROM | |
419 | /* | |
420 | * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, | |
421 | * we need to fix up pointers into the BSS region. | |
6d7d0ae5 | 422 | * Note that the stack pointer has already been fixed up. |
1da177e4 LT |
423 | */ |
424 | add r2, r2, r0 | |
425 | add r3, r3, r0 | |
1da177e4 LT |
426 | |
427 | /* | |
428 | * Relocate all entries in the GOT table. | |
e2a6a3aa | 429 | * Bump bss entries to _edata + dtb size |
1da177e4 | 430 | */ |
98e12b5a | 431 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
e2a6a3aa JB |
432 | add r1, r1, r0 @ This fixes up C references |
433 | cmp r1, r2 @ if entry >= bss_start && | |
434 | cmphs r3, r1 @ bss_end > entry | |
435 | addhi r1, r1, r5 @ entry += dtb size | |
436 | str r1, [r11], #4 @ next entry | |
6d7d0ae5 | 437 | cmp r11, r12 |
1da177e4 | 438 | blo 1b |
e2a6a3aa JB |
439 | |
440 | /* bump our bss pointers too */ | |
441 | add r2, r2, r5 | |
442 | add r3, r3, r5 | |
443 | ||
1da177e4 LT |
444 | #else |
445 | ||
446 | /* | |
447 | * Relocate entries in the GOT table. We only relocate | |
448 | * the entries that are outside the (relocated) BSS region. | |
449 | */ | |
98e12b5a | 450 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
1da177e4 LT |
451 | cmp r1, r2 @ entry < bss_start || |
452 | cmphs r3, r1 @ _end < entry | |
453 | addlo r1, r1, r0 @ table. This fixes up the | |
98e12b5a | 454 | str r1, [r11], #4 @ C references. |
6d7d0ae5 | 455 | cmp r11, r12 |
1da177e4 LT |
456 | blo 1b |
457 | #endif | |
458 | ||
459 | not_relocated: mov r0, #0 | |
460 | 1: str r0, [r2], #4 @ clear bss | |
461 | str r0, [r2], #4 | |
462 | str r0, [r2], #4 | |
463 | str r0, [r2], #4 | |
464 | cmp r2, r3 | |
465 | blo 1b | |
466 | ||
1da177e4 | 467 | /* |
6d7d0ae5 NP |
468 | * The C runtime environment should now be setup sufficiently. |
469 | * Set up some pointers, and start decompressing. | |
470 | * r4 = kernel execution address | |
471 | * r7 = architecture ID | |
472 | * r8 = atags pointer | |
1da177e4 | 473 | */ |
6d7d0ae5 NP |
474 | mov r0, r4 |
475 | mov r1, sp @ malloc space above stack | |
476 | add r2, sp, #0x10000 @ 64k max | |
1da177e4 LT |
477 | mov r3, r7 |
478 | bl decompress_kernel | |
1da177e4 | 479 | bl cache_clean_flush |
6d7d0ae5 | 480 | bl cache_off |
6d7d0ae5 NP |
481 | mov r1, r7 @ restore architecture number |
482 | mov r2, r8 @ restore atags pointer | |
424e5994 DM |
483 | |
484 | #ifdef CONFIG_ARM_VIRT_EXT | |
485 | mrs r0, spsr @ Get saved CPU boot mode | |
486 | and r0, r0, #MODE_MASK | |
487 | cmp r0, #HYP_MODE @ if not booted in HYP mode... | |
488 | bne __enter_kernel @ boot kernel directly | |
489 | ||
490 | adr r12, .L__hyp_reentry_vectors_offset | |
491 | ldr r0, [r12] | |
492 | add r0, r0, r12 | |
493 | ||
494 | bl __hyp_set_vectors | |
495 | __HVC(0) @ otherwise bounce to hyp mode | |
496 | ||
497 | b . @ should never be reached | |
498 | ||
499 | .align 2 | |
500 | .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . | |
501 | #else | |
502 | b __enter_kernel | |
503 | #endif | |
1da177e4 | 504 | |
88987ef9 | 505 | .align 2 |
1da177e4 LT |
506 | .type LC0, #object |
507 | LC0: .word LC0 @ r1 | |
508 | .word __bss_start @ r2 | |
509 | .word _end @ r3 | |
6d7d0ae5 | 510 | .word _edata @ r6 |
34cc1a8f | 511 | .word input_data_end - 4 @ r10 (inflated size location) |
98e12b5a | 512 | .word _got_start @ r11 |
1da177e4 | 513 | .word _got_end @ ip |
8d7e4cc2 | 514 | .word .L_user_stack_end @ sp |
1da177e4 LT |
515 | .size LC0, . - LC0 |
516 | ||
517 | #ifdef CONFIG_ARCH_RPC | |
518 | .globl params | |
db7b2b4b | 519 | params: ldr r0, =0x10000100 @ params_phys for RPC |
1da177e4 LT |
520 | mov pc, lr |
521 | .ltorg | |
522 | .align | |
523 | #endif | |
524 | ||
525 | /* | |
526 | * Turn on the cache. We need to setup some page tables so that we | |
527 | * can have both the I and D caches on. | |
528 | * | |
529 | * We place the page tables 16k down from the kernel execution address, | |
530 | * and we hope that nothing else is using it. If we're using it, we | |
531 | * will go pop! | |
532 | * | |
533 | * On entry, | |
534 | * r4 = kernel execution address | |
1da177e4 | 535 | * r7 = architecture number |
f4619025 | 536 | * r8 = atags pointer |
1da177e4 | 537 | * On exit, |
21b2841d | 538 | * r0, r1, r2, r3, r9, r10, r12 corrupted |
1da177e4 | 539 | * This routine must preserve: |
6d7d0ae5 | 540 | * r4, r7, r8 |
1da177e4 LT |
541 | */ |
542 | .align 5 | |
543 | cache_on: mov r3, #8 @ cache_on function | |
544 | b call_cache_fn | |
545 | ||
10c2df65 HC |
546 | /* |
547 | * Initialize the highest priority protection region, PR7 | |
548 | * to cover all 32bit address and cacheable and bufferable. | |
549 | */ | |
550 | __armv4_mpu_cache_on: | |
551 | mov r0, #0x3f @ 4G, the whole | |
552 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting | |
553 | mcr p15, 0, r0, c6, c7, 1 | |
554 | ||
555 | mov r0, #0x80 @ PR7 | |
556 | mcr p15, 0, r0, c2, c0, 0 @ D-cache on | |
557 | mcr p15, 0, r0, c2, c0, 1 @ I-cache on | |
558 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on | |
559 | ||
560 | mov r0, #0xc000 | |
561 | mcr p15, 0, r0, c5, c0, 1 @ I-access permission | |
562 | mcr p15, 0, r0, c5, c0, 0 @ D-access permission | |
563 | ||
564 | mov r0, #0 | |
565 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
566 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache | |
567 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache | |
568 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
569 | @ ...I .... ..D. WC.M | |
570 | orr r0, r0, #0x002d @ .... .... ..1. 11.1 | |
571 | orr r0, r0, #0x1000 @ ...1 .... .... .... | |
572 | ||
573 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
574 | ||
575 | mov r0, #0 | |
576 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache | |
577 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache | |
578 | mov pc, lr | |
579 | ||
580 | __armv3_mpu_cache_on: | |
581 | mov r0, #0x3f @ 4G, the whole | |
582 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting | |
583 | ||
584 | mov r0, #0x80 @ PR7 | |
585 | mcr p15, 0, r0, c2, c0, 0 @ cache on | |
586 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on | |
587 | ||
588 | mov r0, #0xc000 | |
589 | mcr p15, 0, r0, c5, c0, 0 @ access permission | |
590 | ||
591 | mov r0, #0 | |
592 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
4a8d57a5 UKK |
593 | /* |
594 | * ?? ARMv3 MMU does not allow reading the control register, | |
595 | * does this really work on ARMv3 MPU? | |
596 | */ | |
10c2df65 HC |
597 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
598 | @ .... .... .... WC.M | |
599 | orr r0, r0, #0x000d @ .... .... .... 11.1 | |
4a8d57a5 | 600 | /* ?? this overwrites the value constructed above? */ |
10c2df65 HC |
601 | mov r0, #0 |
602 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
603 | ||
4a8d57a5 | 604 | /* ?? invalidate for the second time? */ |
10c2df65 HC |
605 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 |
606 | mov pc, lr | |
607 | ||
1fdc08ab RK |
608 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH |
609 | #define CB_BITS 0x08 | |
610 | #else | |
611 | #define CB_BITS 0x0c | |
612 | #endif | |
613 | ||
1da177e4 LT |
614 | __setup_mmu: sub r3, r4, #16384 @ Page directory size |
615 | bic r3, r3, #0xff @ Align the pointer | |
616 | bic r3, r3, #0x3f00 | |
617 | /* | |
618 | * Initialise the page tables, turning on the cacheable and bufferable | |
619 | * bits for the RAM area only. | |
620 | */ | |
621 | mov r0, r3 | |
f4619025 RK |
622 | mov r9, r0, lsr #18 |
623 | mov r9, r9, lsl #18 @ start of RAM | |
624 | add r10, r9, #0x10000000 @ a reasonable RAM size | |
1fdc08ab RK |
625 | mov r1, #0x12 @ XN|U + section mapping |
626 | orr r1, r1, #3 << 10 @ AP=11 | |
1da177e4 | 627 | add r2, r3, #16384 |
265d5e48 | 628 | 1: cmp r1, r9 @ if virt > start of RAM |
1fdc08ab RK |
629 | cmphs r10, r1 @ && end of RAM > virt |
630 | bic r1, r1, #0x1c @ clear XN|U + C + B | |
631 | orrlo r1, r1, #0x10 @ Set XN|U for non-RAM | |
632 | orrhs r1, r1, r6 @ set RAM section settings | |
1da177e4 LT |
633 | str r1, [r0], #4 @ 1:1 mapping |
634 | add r1, r1, #1048576 | |
635 | teq r0, r2 | |
636 | bne 1b | |
637 | /* | |
638 | * If ever we are running from Flash, then we surely want the cache | |
639 | * to be enabled also for our execution instance... We map 2MB of it | |
640 | * so there is no map overlap problem for up to 1 MB compressed kernel. | |
641 | * If the execution is in RAM then we would only be duplicating the above. | |
642 | */ | |
1fdc08ab | 643 | orr r1, r6, #0x04 @ ensure B is set for this |
1da177e4 | 644 | orr r1, r1, #3 << 10 |
bfa64c4a DM |
645 | mov r2, pc |
646 | mov r2, r2, lsr #20 | |
1da177e4 LT |
647 | orr r1, r1, r2, lsl #20 |
648 | add r0, r3, r2, lsl #2 | |
649 | str r1, [r0], #4 | |
650 | add r1, r1, #1048576 | |
651 | str r1, [r0] | |
652 | mov pc, lr | |
93ed3970 | 653 | ENDPROC(__setup_mmu) |
1da177e4 | 654 | |
af3e4fd3 MG |
655 | __arm926ejs_mmu_cache_on: |
656 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
657 | mov r0, #4 @ put dcache in WT mode | |
658 | mcr p15, 7, r0, c15, c0, 0 | |
659 | #endif | |
660 | ||
c76b6b41 | 661 | __armv4_mmu_cache_on: |
1da177e4 | 662 | mov r12, lr |
8bdca0ac | 663 | #ifdef CONFIG_MMU |
1fdc08ab | 664 | mov r6, #CB_BITS | 0x12 @ U |
1da177e4 LT |
665 | bl __setup_mmu |
666 | mov r0, #0 | |
667 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
668 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
669 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
670 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement | |
671 | orr r0, r0, #0x0030 | |
26584853 CM |
672 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
673 | orr r0, r0, #1 << 25 @ big-endian page tables | |
674 | #endif | |
c76b6b41 | 675 | bl __common_mmu_cache_on |
1da177e4 LT |
676 | mov r0, #0 |
677 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
8bdca0ac | 678 | #endif |
1da177e4 LT |
679 | mov pc, r12 |
680 | ||
7d09e854 CM |
681 | __armv7_mmu_cache_on: |
682 | mov r12, lr | |
8bdca0ac | 683 | #ifdef CONFIG_MMU |
7d09e854 CM |
684 | mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 |
685 | tst r11, #0xf @ VMSA | |
1fdc08ab | 686 | movne r6, #CB_BITS | 0x02 @ !XN |
7d09e854 CM |
687 | blne __setup_mmu |
688 | mov r0, #0 | |
689 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
690 | tst r11, #0xf @ VMSA | |
691 | mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
8bdca0ac | 692 | #endif |
7d09e854 | 693 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
e1e5b7e4 | 694 | bic r0, r0, #1 << 28 @ clear SCTLR.TRE |
7d09e854 CM |
695 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement |
696 | orr r0, r0, #0x003c @ write buffer | |
8bdca0ac | 697 | #ifdef CONFIG_MMU |
26584853 CM |
698 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
699 | orr r0, r0, #1 << 25 @ big-endian page tables | |
700 | #endif | |
dbece458 | 701 | mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg |
7d09e854 | 702 | orrne r0, r0, #1 @ MMU enabled |
1fdc08ab | 703 | movne r1, #0xfffffffd @ domain 0 = client |
dbece458 WD |
704 | bic r6, r6, #1 << 31 @ 32-bit translation system |
705 | bic r6, r6, #3 << 0 @ use only ttbr0 | |
7d09e854 CM |
706 | mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer |
707 | mcrne p15, 0, r1, c3, c0, 0 @ load domain access control | |
dbece458 | 708 | mcrne p15, 0, r6, c2, c0, 2 @ load ttb control |
8bdca0ac | 709 | #endif |
d675d0bc | 710 | mcr p15, 0, r0, c7, c5, 4 @ ISB |
7d09e854 CM |
711 | mcr p15, 0, r0, c1, c0, 0 @ load control register |
712 | mrc p15, 0, r0, c1, c0, 0 @ and read it back | |
713 | mov r0, #0 | |
714 | mcr p15, 0, r0, c7, c5, 4 @ ISB | |
715 | mov pc, r12 | |
716 | ||
28853ac8 PZ |
717 | __fa526_cache_on: |
718 | mov r12, lr | |
1fdc08ab | 719 | mov r6, #CB_BITS | 0x12 @ U |
28853ac8 PZ |
720 | bl __setup_mmu |
721 | mov r0, #0 | |
722 | mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache | |
723 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
724 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB | |
725 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
726 | orr r0, r0, #0x1000 @ I-cache enable | |
727 | bl __common_mmu_cache_on | |
728 | mov r0, #0 | |
729 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB | |
730 | mov pc, r12 | |
731 | ||
c76b6b41 | 732 | __common_mmu_cache_on: |
0e056f20 | 733 | #ifndef CONFIG_THUMB2_KERNEL |
1da177e4 LT |
734 | #ifndef DEBUG |
735 | orr r0, r0, #0x000d @ Write buffer, mmu | |
736 | #endif | |
737 | mov r1, #-1 | |
738 | mcr p15, 0, r3, c2, c0, 0 @ load page table pointer | |
739 | mcr p15, 0, r1, c3, c0, 0 @ load domain access control | |
2dc7667b NP |
740 | b 1f |
741 | .align 5 @ cache line aligned | |
742 | 1: mcr p15, 0, r0, c1, c0, 0 @ load control register | |
743 | mrc p15, 0, r0, c1, c0, 0 @ and read it back to | |
744 | sub pc, lr, r0, lsr #32 @ properly flush pipeline | |
0e056f20 | 745 | #endif |
1da177e4 | 746 | |
946a105e DM |
747 | #define PROC_ENTRY_SIZE (4*5) |
748 | ||
1da177e4 LT |
749 | /* |
750 | * Here follow the relocatable cache support functions for the | |
751 | * various processors. This is a generic hook for locating an | |
752 | * entry and jumping to an instruction at the specified offset | |
753 | * from the start of the block. Please note this is all position | |
754 | * independent code. | |
755 | * | |
756 | * r1 = corrupted | |
757 | * r2 = corrupted | |
758 | * r3 = block offset | |
98e12b5a | 759 | * r9 = corrupted |
1da177e4 LT |
760 | * r12 = corrupted |
761 | */ | |
762 | ||
763 | call_cache_fn: adr r12, proc_types | |
f12d0d7c | 764 | #ifdef CONFIG_CPU_CP15 |
98e12b5a | 765 | mrc p15, 0, r9, c0, c0 @ get processor ID |
f12d0d7c | 766 | #else |
98e12b5a | 767 | ldr r9, =CONFIG_PROCESSOR_ID |
f12d0d7c | 768 | #endif |
1da177e4 LT |
769 | 1: ldr r1, [r12, #0] @ get value |
770 | ldr r2, [r12, #4] @ get mask | |
98e12b5a | 771 | eor r1, r1, r9 @ (real ^ match) |
1da177e4 | 772 | tst r1, r2 @ & mask |
0e056f20 CM |
773 | ARM( addeq pc, r12, r3 ) @ call cache function |
774 | THUMB( addeq r12, r3 ) | |
775 | THUMB( moveq pc, r12 ) @ call cache function | |
946a105e | 776 | add r12, r12, #PROC_ENTRY_SIZE |
1da177e4 LT |
777 | b 1b |
778 | ||
779 | /* | |
780 | * Table for cache operations. This is basically: | |
781 | * - CPU ID match | |
782 | * - CPU ID mask | |
783 | * - 'cache on' method instruction | |
784 | * - 'cache off' method instruction | |
785 | * - 'cache flush' method instruction | |
786 | * | |
787 | * We match an entry using: ((real_id ^ match) & mask) == 0 | |
788 | * | |
789 | * Writethrough caches generally only need 'on' and 'off' | |
790 | * methods. Writeback caches _must_ have the flush method | |
791 | * defined. | |
792 | */ | |
88987ef9 | 793 | .align 2 |
1da177e4 LT |
794 | .type proc_types,#object |
795 | proc_types: | |
1da177e4 LT |
796 | .word 0x00000000 @ old ARM ID |
797 | .word 0x0000f000 | |
798 | mov pc, lr | |
0e056f20 | 799 | THUMB( nop ) |
1da177e4 | 800 | mov pc, lr |
0e056f20 | 801 | THUMB( nop ) |
1da177e4 | 802 | mov pc, lr |
0e056f20 | 803 | THUMB( nop ) |
1da177e4 LT |
804 | |
805 | .word 0x41007000 @ ARM7/710 | |
806 | .word 0xfff8fe00 | |
4cdfc2ec RK |
807 | mov pc, lr |
808 | THUMB( nop ) | |
809 | mov pc, lr | |
810 | THUMB( nop ) | |
1da177e4 | 811 | mov pc, lr |
0e056f20 | 812 | THUMB( nop ) |
1da177e4 LT |
813 | |
814 | .word 0x41807200 @ ARM720T (writethrough) | |
815 | .word 0xffffff00 | |
0e056f20 CM |
816 | W(b) __armv4_mmu_cache_on |
817 | W(b) __armv4_mmu_cache_off | |
1da177e4 | 818 | mov pc, lr |
0e056f20 | 819 | THUMB( nop ) |
1da177e4 | 820 | |
10c2df65 HC |
821 | .word 0x41007400 @ ARM74x |
822 | .word 0xff00ff00 | |
0e056f20 CM |
823 | W(b) __armv3_mpu_cache_on |
824 | W(b) __armv3_mpu_cache_off | |
825 | W(b) __armv3_mpu_cache_flush | |
10c2df65 HC |
826 | |
827 | .word 0x41009400 @ ARM94x | |
828 | .word 0xff00ff00 | |
0e056f20 CM |
829 | W(b) __armv4_mpu_cache_on |
830 | W(b) __armv4_mpu_cache_off | |
831 | W(b) __armv4_mpu_cache_flush | |
10c2df65 | 832 | |
af3e4fd3 MG |
833 | .word 0x41069260 @ ARM926EJ-S (v5TEJ) |
834 | .word 0xff0ffff0 | |
720c60e1 NP |
835 | W(b) __arm926ejs_mmu_cache_on |
836 | W(b) __armv4_mmu_cache_off | |
837 | W(b) __armv5tej_mmu_cache_flush | |
10c2df65 | 838 | |
1da177e4 LT |
839 | .word 0x00007000 @ ARM7 IDs |
840 | .word 0x0000f000 | |
841 | mov pc, lr | |
0e056f20 | 842 | THUMB( nop ) |
1da177e4 | 843 | mov pc, lr |
0e056f20 | 844 | THUMB( nop ) |
1da177e4 | 845 | mov pc, lr |
0e056f20 | 846 | THUMB( nop ) |
1da177e4 LT |
847 | |
848 | @ Everything from here on will be the new ID system. | |
849 | ||
850 | .word 0x4401a100 @ sa110 / sa1100 | |
851 | .word 0xffffffe0 | |
0e056f20 CM |
852 | W(b) __armv4_mmu_cache_on |
853 | W(b) __armv4_mmu_cache_off | |
854 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
855 | |
856 | .word 0x6901b110 @ sa1110 | |
857 | .word 0xfffffff0 | |
0e056f20 CM |
858 | W(b) __armv4_mmu_cache_on |
859 | W(b) __armv4_mmu_cache_off | |
860 | W(b) __armv4_mmu_cache_flush | |
1da177e4 | 861 | |
4157d317 HZ |
862 | .word 0x56056900 |
863 | .word 0xffffff00 @ PXA9xx | |
0e056f20 CM |
864 | W(b) __armv4_mmu_cache_on |
865 | W(b) __armv4_mmu_cache_off | |
866 | W(b) __armv4_mmu_cache_flush | |
49cbe786 EM |
867 | |
868 | .word 0x56158000 @ PXA168 | |
869 | .word 0xfffff000 | |
0e056f20 CM |
870 | W(b) __armv4_mmu_cache_on |
871 | W(b) __armv4_mmu_cache_off | |
872 | W(b) __armv5tej_mmu_cache_flush | |
49cbe786 | 873 | |
2e2023fe NP |
874 | .word 0x56050000 @ Feroceon |
875 | .word 0xff0f0000 | |
0e056f20 CM |
876 | W(b) __armv4_mmu_cache_on |
877 | W(b) __armv4_mmu_cache_off | |
878 | W(b) __armv5tej_mmu_cache_flush | |
3ebb5a2b | 879 | |
5587931c JS |
880 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID |
881 | /* this conflicts with the standard ARMv5TE entry */ | |
882 | .long 0x41009260 @ Old Feroceon | |
883 | .long 0xff00fff0 | |
884 | b __armv4_mmu_cache_on | |
885 | b __armv4_mmu_cache_off | |
886 | b __armv5tej_mmu_cache_flush | |
887 | #endif | |
888 | ||
28853ac8 PZ |
889 | .word 0x66015261 @ FA526 |
890 | .word 0xff01fff1 | |
0e056f20 CM |
891 | W(b) __fa526_cache_on |
892 | W(b) __armv4_mmu_cache_off | |
893 | W(b) __fa526_cache_flush | |
28853ac8 | 894 | |
1da177e4 LT |
895 | @ These match on the architecture ID |
896 | ||
897 | .word 0x00020000 @ ARMv4T | |
898 | .word 0x000f0000 | |
0e056f20 CM |
899 | W(b) __armv4_mmu_cache_on |
900 | W(b) __armv4_mmu_cache_off | |
901 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
902 | |
903 | .word 0x00050000 @ ARMv5TE | |
904 | .word 0x000f0000 | |
0e056f20 CM |
905 | W(b) __armv4_mmu_cache_on |
906 | W(b) __armv4_mmu_cache_off | |
907 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
908 | |
909 | .word 0x00060000 @ ARMv5TEJ | |
910 | .word 0x000f0000 | |
0e056f20 CM |
911 | W(b) __armv4_mmu_cache_on |
912 | W(b) __armv4_mmu_cache_off | |
75216859 | 913 | W(b) __armv5tej_mmu_cache_flush |
1da177e4 | 914 | |
45a7b9cf | 915 | .word 0x0007b000 @ ARMv6 |
7d09e854 | 916 | .word 0x000ff000 |
0e056f20 CM |
917 | W(b) __armv4_mmu_cache_on |
918 | W(b) __armv4_mmu_cache_off | |
919 | W(b) __armv6_mmu_cache_flush | |
1da177e4 | 920 | |
7d09e854 CM |
921 | .word 0x000f0000 @ new CPU Id |
922 | .word 0x000f0000 | |
0e056f20 CM |
923 | W(b) __armv7_mmu_cache_on |
924 | W(b) __armv7_mmu_cache_off | |
925 | W(b) __armv7_mmu_cache_flush | |
7d09e854 | 926 | |
1da177e4 LT |
927 | .word 0 @ unrecognised type |
928 | .word 0 | |
929 | mov pc, lr | |
0e056f20 | 930 | THUMB( nop ) |
1da177e4 | 931 | mov pc, lr |
0e056f20 | 932 | THUMB( nop ) |
1da177e4 | 933 | mov pc, lr |
0e056f20 | 934 | THUMB( nop ) |
1da177e4 LT |
935 | |
936 | .size proc_types, . - proc_types | |
937 | ||
946a105e DM |
938 | /* |
939 | * If you get a "non-constant expression in ".if" statement" | |
940 | * error from the assembler on this line, check that you have | |
941 | * not accidentally written a "b" instruction where you should | |
942 | * have written W(b). | |
943 | */ | |
944 | .if (. - proc_types) % PROC_ENTRY_SIZE != 0 | |
945 | .error "The size of one or more proc_types entries is wrong." | |
946 | .endif | |
947 | ||
1da177e4 LT |
948 | /* |
949 | * Turn off the Cache and MMU. ARMv3 does not support | |
950 | * reading the control register, but ARMv4 does. | |
951 | * | |
21b2841d UKK |
952 | * On exit, |
953 | * r0, r1, r2, r3, r9, r12 corrupted | |
954 | * This routine must preserve: | |
6d7d0ae5 | 955 | * r4, r7, r8 |
1da177e4 LT |
956 | */ |
957 | .align 5 | |
958 | cache_off: mov r3, #12 @ cache_off function | |
959 | b call_cache_fn | |
960 | ||
10c2df65 HC |
961 | __armv4_mpu_cache_off: |
962 | mrc p15, 0, r0, c1, c0 | |
963 | bic r0, r0, #0x000d | |
964 | mcr p15, 0, r0, c1, c0 @ turn MPU and cache off | |
965 | mov r0, #0 | |
966 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
967 | mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache | |
968 | mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache | |
969 | mov pc, lr | |
970 | ||
971 | __armv3_mpu_cache_off: | |
972 | mrc p15, 0, r0, c1, c0 | |
973 | bic r0, r0, #0x000d | |
974 | mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off | |
975 | mov r0, #0 | |
976 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
977 | mov pc, lr | |
978 | ||
c76b6b41 | 979 | __armv4_mmu_cache_off: |
8bdca0ac | 980 | #ifdef CONFIG_MMU |
1da177e4 LT |
981 | mrc p15, 0, r0, c1, c0 |
982 | bic r0, r0, #0x000d | |
983 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off | |
984 | mov r0, #0 | |
985 | mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 | |
986 | mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 | |
8bdca0ac | 987 | #endif |
1da177e4 LT |
988 | mov pc, lr |
989 | ||
7d09e854 CM |
990 | __armv7_mmu_cache_off: |
991 | mrc p15, 0, r0, c1, c0 | |
8bdca0ac | 992 | #ifdef CONFIG_MMU |
7d09e854 | 993 | bic r0, r0, #0x000d |
8bdca0ac CM |
994 | #else |
995 | bic r0, r0, #0x000c | |
996 | #endif | |
7d09e854 CM |
997 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off |
998 | mov r12, lr | |
999 | bl __armv7_mmu_cache_flush | |
1000 | mov r0, #0 | |
8bdca0ac | 1001 | #ifdef CONFIG_MMU |
7d09e854 | 1002 | mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB |
8bdca0ac | 1003 | #endif |
c30c2f99 CM |
1004 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC |
1005 | mcr p15, 0, r0, c7, c10, 4 @ DSB | |
1006 | mcr p15, 0, r0, c7, c5, 4 @ ISB | |
7d09e854 CM |
1007 | mov pc, r12 |
1008 | ||
1da177e4 LT |
1009 | /* |
1010 | * Clean and flush the cache to maintain consistency. | |
1011 | * | |
1da177e4 | 1012 | * On exit, |
21b2841d | 1013 | * r1, r2, r3, r9, r10, r11, r12 corrupted |
1da177e4 | 1014 | * This routine must preserve: |
6d7d0ae5 | 1015 | * r4, r6, r7, r8 |
1da177e4 LT |
1016 | */ |
1017 | .align 5 | |
1018 | cache_clean_flush: | |
1019 | mov r3, #16 | |
1020 | b call_cache_fn | |
1021 | ||
10c2df65 HC |
1022 | __armv4_mpu_cache_flush: |
1023 | mov r2, #1 | |
1024 | mov r3, #0 | |
1025 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
1026 | mov r1, #7 << 5 @ 8 segments | |
1027 | 1: orr r3, r1, #63 << 26 @ 64 entries | |
1028 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index | |
1029 | subs r3, r3, #1 << 26 | |
1030 | bcs 2b @ entries 63 to 0 | |
1031 | subs r1, r1, #1 << 5 | |
1032 | bcs 1b @ segments 7 to 0 | |
1033 | ||
1034 | teq r2, #0 | |
1035 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
1036 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
1037 | mov pc, lr | |
1038 | ||
28853ac8 PZ |
1039 | __fa526_cache_flush: |
1040 | mov r1, #0 | |
1041 | mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache | |
1042 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache | |
1043 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
1044 | mov pc, lr | |
10c2df65 | 1045 | |
c76b6b41 | 1046 | __armv6_mmu_cache_flush: |
1da177e4 LT |
1047 | mov r1, #0 |
1048 | mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D | |
1049 | mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB | |
1050 | mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified | |
1051 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
1052 | mov pc, lr | |
1053 | ||
7d09e854 CM |
1054 | __armv7_mmu_cache_flush: |
1055 | mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 | |
1056 | tst r10, #0xf << 16 @ hierarchical cache (ARMv7) | |
7d09e854 | 1057 | mov r10, #0 |
c30c2f99 | 1058 | beq hierarchical |
7d09e854 CM |
1059 | mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D |
1060 | b iflush | |
1061 | hierarchical: | |
c30c2f99 | 1062 | mcr p15, 0, r10, c7, c10, 5 @ DMB |
0e056f20 | 1063 | stmfd sp!, {r0-r7, r9-r11} |
7d09e854 CM |
1064 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
1065 | ands r3, r0, #0x7000000 @ extract loc from clidr | |
1066 | mov r3, r3, lsr #23 @ left align loc bit field | |
1067 | beq finished @ if loc is 0, then no need to clean | |
1068 | mov r10, #0 @ start clean at cache level 0 | |
1069 | loop1: | |
1070 | add r2, r10, r10, lsr #1 @ work out 3x current cache level | |
1071 | mov r1, r0, lsr r2 @ extract cache type bits from clidr | |
1072 | and r1, r1, #7 @ mask of the bits for current cache only | |
1073 | cmp r1, #2 @ see what cache we have at this level | |
1074 | blt skip @ skip if no cache, or just i-cache | |
1075 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | |
1076 | mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr | |
1077 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr | |
1078 | and r2, r1, #7 @ extract the length of the cache lines | |
1079 | add r2, r2, #4 @ add 4 (line length offset) | |
1080 | ldr r4, =0x3ff | |
1081 | ands r4, r4, r1, lsr #3 @ find maximum number on the way size | |
000b5025 | 1082 | clz r5, r4 @ find bit position of way size increment |
7d09e854 CM |
1083 | ldr r7, =0x7fff |
1084 | ands r7, r7, r1, lsr #13 @ extract max number of the index size | |
1085 | loop2: | |
1086 | mov r9, r4 @ create working copy of max way size | |
1087 | loop3: | |
0e056f20 CM |
1088 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
1089 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 | |
1090 | THUMB( lsl r6, r9, r5 ) | |
1091 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 | |
1092 | THUMB( lsl r6, r7, r2 ) | |
1093 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 | |
7d09e854 CM |
1094 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way |
1095 | subs r9, r9, #1 @ decrement the way | |
1096 | bge loop3 | |
1097 | subs r7, r7, #1 @ decrement the index | |
1098 | bge loop2 | |
1099 | skip: | |
1100 | add r10, r10, #2 @ increment cache number | |
1101 | cmp r3, r10 | |
1102 | bgt loop1 | |
1103 | finished: | |
0e056f20 | 1104 | ldmfd sp!, {r0-r7, r9-r11} |
7d09e854 CM |
1105 | mov r10, #0 @ swith back to cache level 0 |
1106 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | |
7d09e854 | 1107 | iflush: |
c30c2f99 | 1108 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
7d09e854 | 1109 | mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB |
c30c2f99 CM |
1110 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
1111 | mcr p15, 0, r10, c7, c5, 4 @ ISB | |
7d09e854 CM |
1112 | mov pc, lr |
1113 | ||
15754bf9 NP |
1114 | __armv5tej_mmu_cache_flush: |
1115 | 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache | |
1116 | bne 1b | |
1117 | mcr p15, 0, r0, c7, c5, 0 @ flush I cache | |
1118 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
1119 | mov pc, lr | |
1120 | ||
c76b6b41 | 1121 | __armv4_mmu_cache_flush: |
1da177e4 LT |
1122 | mov r2, #64*1024 @ default: 32K dcache size (*2) |
1123 | mov r11, #32 @ default: 32 byte line size | |
1124 | mrc p15, 0, r3, c0, c0, 1 @ read cache type | |
98e12b5a | 1125 | teq r3, r9 @ cache ID register present? |
1da177e4 LT |
1126 | beq no_cache_id |
1127 | mov r1, r3, lsr #18 | |
1128 | and r1, r1, #7 | |
1129 | mov r2, #1024 | |
1130 | mov r2, r2, lsl r1 @ base dcache size *2 | |
1131 | tst r3, #1 << 14 @ test M bit | |
1132 | addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 | |
1133 | mov r3, r3, lsr #12 | |
1134 | and r3, r3, #3 | |
1135 | mov r11, #8 | |
1136 | mov r11, r11, lsl r3 @ cache line size in bytes | |
1137 | no_cache_id: | |
0e056f20 CM |
1138 | mov r1, pc |
1139 | bic r1, r1, #63 @ align to longest cache line | |
1da177e4 | 1140 | add r2, r1, r2 |
0e056f20 CM |
1141 | 1: |
1142 | ARM( ldr r3, [r1], r11 ) @ s/w flush D cache | |
1143 | THUMB( ldr r3, [r1] ) @ s/w flush D cache | |
1144 | THUMB( add r1, r1, r11 ) | |
1da177e4 LT |
1145 | teq r1, r2 |
1146 | bne 1b | |
1147 | ||
1148 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache | |
1149 | mcr p15, 0, r1, c7, c6, 0 @ flush D cache | |
1150 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
1151 | mov pc, lr | |
1152 | ||
c76b6b41 | 1153 | __armv3_mmu_cache_flush: |
10c2df65 | 1154 | __armv3_mpu_cache_flush: |
1da177e4 | 1155 | mov r1, #0 |
63fa7187 | 1156 | mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 |
1da177e4 LT |
1157 | mov pc, lr |
1158 | ||
1159 | /* | |
1160 | * Various debugging routines for printing hex characters and | |
1161 | * memory, which again must be relocatable. | |
1162 | */ | |
1163 | #ifdef DEBUG | |
88987ef9 | 1164 | .align 2 |
1da177e4 LT |
1165 | .type phexbuf,#object |
1166 | phexbuf: .space 12 | |
1167 | .size phexbuf, . - phexbuf | |
1168 | ||
be6f9f00 | 1169 | @ phex corrupts {r0, r1, r2, r3} |
1da177e4 LT |
1170 | phex: adr r3, phexbuf |
1171 | mov r2, #0 | |
1172 | strb r2, [r3, r1] | |
1173 | 1: subs r1, r1, #1 | |
1174 | movmi r0, r3 | |
1175 | bmi puts | |
1176 | and r2, r0, #15 | |
1177 | mov r0, r0, lsr #4 | |
1178 | cmp r2, #10 | |
1179 | addge r2, r2, #7 | |
1180 | add r2, r2, #'0' | |
1181 | strb r2, [r3, r1] | |
1182 | b 1b | |
1183 | ||
be6f9f00 | 1184 | @ puts corrupts {r0, r1, r2, r3} |
4e6d488a | 1185 | puts: loadsp r3, r1 |
1da177e4 LT |
1186 | 1: ldrb r2, [r0], #1 |
1187 | teq r2, #0 | |
1188 | moveq pc, lr | |
5cd0c344 | 1189 | 2: writeb r2, r3 |
1da177e4 LT |
1190 | mov r1, #0x00020000 |
1191 | 3: subs r1, r1, #1 | |
1192 | bne 3b | |
1193 | teq r2, #'\n' | |
1194 | moveq r2, #'\r' | |
1195 | beq 2b | |
1196 | teq r0, #0 | |
1197 | bne 1b | |
1198 | mov pc, lr | |
be6f9f00 | 1199 | @ putc corrupts {r0, r1, r2, r3} |
1da177e4 LT |
1200 | putc: |
1201 | mov r2, r0 | |
1202 | mov r0, #0 | |
4e6d488a | 1203 | loadsp r3, r1 |
1da177e4 LT |
1204 | b 2b |
1205 | ||
be6f9f00 | 1206 | @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} |
1da177e4 LT |
1207 | memdump: mov r12, r0 |
1208 | mov r10, lr | |
1209 | mov r11, #0 | |
1210 | 2: mov r0, r11, lsl #2 | |
1211 | add r0, r0, r12 | |
1212 | mov r1, #8 | |
1213 | bl phex | |
1214 | mov r0, #':' | |
1215 | bl putc | |
1216 | 1: mov r0, #' ' | |
1217 | bl putc | |
1218 | ldr r0, [r12, r11, lsl #2] | |
1219 | mov r1, #8 | |
1220 | bl phex | |
1221 | and r0, r11, #7 | |
1222 | teq r0, #3 | |
1223 | moveq r0, #' ' | |
1224 | bleq putc | |
1225 | and r0, r11, #7 | |
1226 | add r11, r11, #1 | |
1227 | teq r0, #7 | |
1228 | bne 1b | |
1229 | mov r0, #'\n' | |
1230 | bl putc | |
1231 | cmp r11, #64 | |
1232 | blt 2b | |
1233 | mov pc, r10 | |
1234 | #endif | |
1235 | ||
92c83ff1 | 1236 | .ltorg |
424e5994 DM |
1237 | |
1238 | #ifdef CONFIG_ARM_VIRT_EXT | |
1239 | .align 5 | |
1240 | __hyp_reentry_vectors: | |
1241 | W(b) . @ reset | |
1242 | W(b) . @ undef | |
1243 | W(b) . @ svc | |
1244 | W(b) . @ pabort | |
1245 | W(b) . @ dabort | |
1246 | W(b) __enter_kernel @ hyp | |
1247 | W(b) . @ irq | |
1248 | W(b) . @ fiq | |
1249 | #endif /* CONFIG_ARM_VIRT_EXT */ | |
1250 | ||
1251 | __enter_kernel: | |
1252 | mov r0, #0 @ must be 0 | |
1253 | ARM( mov pc, r4 ) @ call kernel | |
1254 | THUMB( bx r4 ) @ entry point is always ARM | |
1255 | ||
adcc2591 | 1256 | reloc_code_end: |
1da177e4 LT |
1257 | |
1258 | .align | |
b0c4d4ee | 1259 | .section ".stack", "aw", %nobits |
8d7e4cc2 NP |
1260 | .L_user_stack: .space 4096 |
1261 | .L_user_stack_end: |