]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/boot/compressed/head.S
ARM: zImage: Fix bad SP address after relocating kernel
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
10c2df65 5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4
LT
11#include <linux/linkage.h>
12
13/*
14 * Debugging stuff
15 *
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
19 */
20#ifdef DEBUG
5cd0c344 21
5cd0c344 22#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9 23
e399b1a4 24#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
4e6d488a 25 .macro loadsp, rb, tmp
7d95ded9
TL
26 .endm
27 .macro writeb, ch, rb
28 mcr p14, 0, \ch, c0, c5, 0
29 .endm
200b7a8d 30#elif defined(CONFIG_CPU_V7)
4e6d488a 31 .macro loadsp, rb, tmp
200b7a8d
TL
32 .endm
33 .macro writeb, ch, rb
34wait: mrc p14, 0, pc, c0, c1, 0
35 bcs wait
36 mcr p14, 0, \ch, c0, c5, 0
37 .endm
c633c3cf 38#elif defined(CONFIG_CPU_XSCALE)
4e6d488a 39 .macro loadsp, rb, tmp
c633c3cf
JCPV
40 .endm
41 .macro writeb, ch, rb
42 mcr p14, 0, \ch, c8, c0, 0
43 .endm
7d95ded9 44#else
4e6d488a 45 .macro loadsp, rb, tmp
1da177e4 46 .endm
224b5be6 47 .macro writeb, ch, rb
41a9e680 48 mcr p14, 0, \ch, c1, c0, 0
1da177e4 49 .endm
7d95ded9
TL
50#endif
51
5cd0c344 52#else
224b5be6 53
a09e64fb 54#include <mach/debug-macro.S>
224b5be6 55
5cd0c344
RK
56 .macro writeb, ch, rb
57 senduart \ch, \rb
1da177e4 58 .endm
5cd0c344 59
224b5be6 60#if defined(CONFIG_ARCH_SA1100)
4e6d488a 61 .macro loadsp, rb, tmp
1da177e4 62 mov \rb, #0x80000000 @ physical base address
224b5be6 63#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 64 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 65#else
1da177e4 66 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 67#endif
1da177e4 68 .endm
1da177e4 69#elif defined(CONFIG_ARCH_S3C2410)
4e6d488a 70 .macro loadsp, rb, tmp
1da177e4 71 mov \rb, #0x50000000
c7657846 72 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
1da177e4 73 .endm
1da177e4 74#else
4e6d488a
TL
75 .macro loadsp, rb, tmp
76 addruart \rb, \tmp
224b5be6 77 .endm
1da177e4 78#endif
5cd0c344 79#endif
1da177e4
LT
80#endif
81
82 .macro kputc,val
83 mov r0, \val
84 bl putc
85 .endm
86
87 .macro kphex,val,len
88 mov r0, \val
89 mov r1, #\len
90 bl phex
91 .endm
92
93 .macro debug_reloc_start
94#ifdef DEBUG
95 kputc #'\n'
96 kphex r6, 8 /* processor id */
97 kputc #':'
98 kphex r7, 8 /* architecture id */
f12d0d7c 99#ifdef CONFIG_CPU_CP15
1da177e4
LT
100 kputc #':'
101 mrc p15, 0, r0, c1, c0
102 kphex r0, 8 /* control reg */
f12d0d7c 103#endif
1da177e4
LT
104 kputc #'\n'
105 kphex r5, 8 /* decompressed kernel start */
106 kputc #'-'
f4619025 107 kphex r9, 8 /* decompressed kernel end */
1da177e4
LT
108 kputc #'>'
109 kphex r4, 8 /* kernel execution address */
110 kputc #'\n'
111#endif
112 .endm
113
114 .macro debug_reloc_end
115#ifdef DEBUG
116 kphex r5, 8 /* end of kernel */
117 kputc #'\n'
118 mov r0, r4
119 bl memdump /* dump 256 bytes at start of kernel */
120#endif
121 .endm
122
123 .section ".start", #alloc, #execinstr
124/*
125 * sort out different calling conventions
126 */
127 .align
26e5ca93 128 .arm @ Always enter in ARM state
1da177e4
LT
129start:
130 .type start,#function
b11fe388 131 .rept 7
1da177e4
LT
132 mov r0, r0
133 .endr
b11fe388
NP
134 ARM( mov r0, r0 )
135 ARM( b 1f )
136 THUMB( adr r12, BSYM(1f) )
137 THUMB( bx r12 )
1da177e4 138
1da177e4
LT
139 .word 0x016f2818 @ Magic numbers to help the loader
140 .word start @ absolute load/run zImage address
141 .word _edata @ zImage end address
26e5ca93 142 THUMB( .thumb )
1da177e4 1431: mov r7, r1 @ save architecture ID
f4619025 144 mov r8, r2 @ save atags pointer
1da177e4
LT
145
146#ifndef __ARM_ARCH_2__
147 /*
148 * Booting from Angel - need to enter SVC mode and disable
149 * FIQs/IRQs (numeric definitions from angel arm.h source).
150 * We only do this if we were in user mode on entry.
151 */
152 mrs r2, cpsr @ get current mode
153 tst r2, #3 @ not user?
154 bne not_angel
155 mov r0, #0x17 @ angel_SWIreason_EnterSVC
0e056f20
CM
156 ARM( swi 0x123456 ) @ angel_SWI_ARM
157 THUMB( svc 0xab ) @ angel_SWI_THUMB
1da177e4
LT
158not_angel:
159 mrs r2, cpsr @ turn off interrupts to
160 orr r2, r2, #0xc0 @ prevent angel from running
161 msr cpsr_c, r2
162#else
163 teqp pc, #0x0c000003 @ turn off interrupts
164#endif
165
166 /*
167 * Note that some cache flushing and other stuff may
168 * be needed here - is there an Angel SWI call for this?
169 */
170
171 /*
172 * some architecture specific code can be inserted
f4619025 173 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
174 */
175
176 .text
6d7d0ae5 177
e69edc79
EM
178#ifdef CONFIG_AUTO_ZRELADDR
179 @ determine final kernel image address
bfa64c4a
DM
180 mov r4, pc
181 and r4, r4, #0xf8000000
e69edc79
EM
182 add r4, r4, #TEXT_OFFSET
183#else
9e84ed63 184 ldr r4, =zreladdr
e69edc79 185#endif
1da177e4 186
6d7d0ae5
NP
187 bl cache_on
188
189restart: adr r0, LC0
190 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
191 ldr sp, [r0, #32]
192
193 /*
194 * We might be running at a different address. We need
195 * to fix up various pointers.
196 */
197 sub r0, r0, r1 @ calculate the delta offset
198 add r5, r5, r0 @ _start
199 add r6, r6, r0 @ _edata
1da177e4 200
6d7d0ae5
NP
201#ifndef CONFIG_ZBOOT_ROM
202 /* malloc space is above the relocated stack (64k max) */
203 add sp, sp, r0
204 add r10, sp, #0x10000
205#else
1da177e4 206 /*
6d7d0ae5
NP
207 * With ZBOOT_ROM the bss/stack is non relocatable,
208 * but someone could still run this code from RAM,
209 * in which case our reference is _edata.
1da177e4 210 */
6d7d0ae5
NP
211 mov r10, r6
212#endif
213
214/*
215 * Check to see if we will overwrite ourselves.
216 * r4 = final kernel address
217 * r5 = start of this image
218 * r9 = size of decompressed image
219 * r10 = end of this image, including bss/stack/malloc space if non XIP
220 * We basically want:
221 * r4 >= r10 -> OK
222 * r4 + image length <= r5 -> OK
223 */
224 cmp r4, r10
225 bhs wont_overwrite
226 add r10, r4, r9
227 cmp r10, r5
228 bls wont_overwrite
229
230/*
231 * Relocate ourselves past the end of the decompressed kernel.
232 * r5 = start of this image
233 * r6 = _edata
234 * r10 = end of the decompressed kernel
235 * Because we always copy ahead, we need to do it from the end and go
236 * backward in case the source and destination overlap.
237 */
238 /* Round up to next 256-byte boundary. */
239 add r10, r10, #256
240 bic r10, r10, #255
241
242 sub r9, r6, r5 @ size to copy
243 add r9, r9, #31 @ rounded up to a multiple
244 bic r9, r9, #31 @ ... of 32 bytes
245 add r6, r9, r5
246 add r9, r9, r10
247
2481: ldmdb r6!, {r0 - r3, r10 - r12, lr}
249 cmp r6, r5
250 stmdb r9!, {r0 - r3, r10 - r12, lr}
251 bhi 1b
252
253 /* Preserve offset to relocated code. */
254 sub r6, r9, r6
255
7c2527f0
TL
256#ifndef CONFIG_ZBOOT_ROM
257 /* cache_clean_flush may use the stack, so relocate it */
258 add sp, sp, r6
259#endif
260
6d7d0ae5
NP
261 bl cache_clean_flush
262
263 adr r0, BSYM(restart)
264 add r0, r0, r6
265 mov pc, r0
266
267wont_overwrite:
268/*
269 * If delta is zero, we are running at the address we were linked at.
270 * r0 = delta
271 * r2 = BSS start
272 * r3 = BSS end
273 * r4 = kernel execution address
274 * r7 = architecture ID
275 * r8 = atags pointer
276 * r11 = GOT start
277 * r12 = GOT end
278 * sp = stack pointer
279 */
280 teq r0, #0
281 beq not_relocated
98e12b5a 282 add r11, r11, r0
6d7d0ae5 283 add r12, r12, r0
1da177e4
LT
284
285#ifndef CONFIG_ZBOOT_ROM
286 /*
287 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
288 * we need to fix up pointers into the BSS region.
6d7d0ae5 289 * Note that the stack pointer has already been fixed up.
1da177e4
LT
290 */
291 add r2, r2, r0
292 add r3, r3, r0
1da177e4
LT
293
294 /*
295 * Relocate all entries in the GOT table.
296 */
98e12b5a 2971: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4 298 add r1, r1, r0 @ table. This fixes up the
98e12b5a 299 str r1, [r11], #4 @ C references.
6d7d0ae5 300 cmp r11, r12
1da177e4
LT
301 blo 1b
302#else
303
304 /*
305 * Relocate entries in the GOT table. We only relocate
306 * the entries that are outside the (relocated) BSS region.
307 */
98e12b5a 3081: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4
LT
309 cmp r1, r2 @ entry < bss_start ||
310 cmphs r3, r1 @ _end < entry
311 addlo r1, r1, r0 @ table. This fixes up the
98e12b5a 312 str r1, [r11], #4 @ C references.
6d7d0ae5 313 cmp r11, r12
1da177e4
LT
314 blo 1b
315#endif
316
317not_relocated: mov r0, #0
3181: str r0, [r2], #4 @ clear bss
319 str r0, [r2], #4
320 str r0, [r2], #4
321 str r0, [r2], #4
322 cmp r2, r3
323 blo 1b
324
1da177e4 325/*
6d7d0ae5
NP
326 * The C runtime environment should now be setup sufficiently.
327 * Set up some pointers, and start decompressing.
328 * r4 = kernel execution address
329 * r7 = architecture ID
330 * r8 = atags pointer
1da177e4 331 */
6d7d0ae5
NP
332 mov r0, r4
333 mov r1, sp @ malloc space above stack
334 add r2, sp, #0x10000 @ 64k max
1da177e4
LT
335 mov r3, r7
336 bl decompress_kernel
1da177e4 337 bl cache_clean_flush
6d7d0ae5
NP
338 bl cache_off
339 mov r0, #0 @ must be zero
340 mov r1, r7 @ restore architecture number
341 mov r2, r8 @ restore atags pointer
342 mov pc, r4 @ call kernel
1da177e4 343
88987ef9 344 .align 2
1da177e4
LT
345 .type LC0, #object
346LC0: .word LC0 @ r1
347 .word __bss_start @ r2
348 .word _end @ r3
1da177e4 349 .word _start @ r5
6d7d0ae5
NP
350 .word _edata @ r6
351 .word _image_size @ r9
98e12b5a 352 .word _got_start @ r11
1da177e4 353 .word _got_end @ ip
88237c25 354 .word user_stack_end @ sp
1da177e4
LT
355 .size LC0, . - LC0
356
357#ifdef CONFIG_ARCH_RPC
358 .globl params
db7b2b4b 359params: ldr r0, =0x10000100 @ params_phys for RPC
1da177e4
LT
360 mov pc, lr
361 .ltorg
362 .align
363#endif
364
365/*
366 * Turn on the cache. We need to setup some page tables so that we
367 * can have both the I and D caches on.
368 *
369 * We place the page tables 16k down from the kernel execution address,
370 * and we hope that nothing else is using it. If we're using it, we
371 * will go pop!
372 *
373 * On entry,
374 * r4 = kernel execution address
1da177e4 375 * r7 = architecture number
f4619025 376 * r8 = atags pointer
1da177e4 377 * On exit,
21b2841d 378 * r0, r1, r2, r3, r9, r10, r12 corrupted
1da177e4 379 * This routine must preserve:
6d7d0ae5 380 * r4, r7, r8
1da177e4
LT
381 */
382 .align 5
383cache_on: mov r3, #8 @ cache_on function
384 b call_cache_fn
385
10c2df65
HC
386/*
387 * Initialize the highest priority protection region, PR7
388 * to cover all 32bit address and cacheable and bufferable.
389 */
390__armv4_mpu_cache_on:
391 mov r0, #0x3f @ 4G, the whole
392 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
393 mcr p15, 0, r0, c6, c7, 1
394
395 mov r0, #0x80 @ PR7
396 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
397 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
398 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
399
400 mov r0, #0xc000
401 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
402 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
403
404 mov r0, #0
405 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
406 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
407 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
408 mrc p15, 0, r0, c1, c0, 0 @ read control reg
409 @ ...I .... ..D. WC.M
410 orr r0, r0, #0x002d @ .... .... ..1. 11.1
411 orr r0, r0, #0x1000 @ ...1 .... .... ....
412
413 mcr p15, 0, r0, c1, c0, 0 @ write control reg
414
415 mov r0, #0
416 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
417 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
418 mov pc, lr
419
420__armv3_mpu_cache_on:
421 mov r0, #0x3f @ 4G, the whole
422 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
423
424 mov r0, #0x80 @ PR7
425 mcr p15, 0, r0, c2, c0, 0 @ cache on
426 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
427
428 mov r0, #0xc000
429 mcr p15, 0, r0, c5, c0, 0 @ access permission
430
431 mov r0, #0
432 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
4a8d57a5
UKK
433 /*
434 * ?? ARMv3 MMU does not allow reading the control register,
435 * does this really work on ARMv3 MPU?
436 */
10c2df65
HC
437 mrc p15, 0, r0, c1, c0, 0 @ read control reg
438 @ .... .... .... WC.M
439 orr r0, r0, #0x000d @ .... .... .... 11.1
4a8d57a5 440 /* ?? this overwrites the value constructed above? */
10c2df65
HC
441 mov r0, #0
442 mcr p15, 0, r0, c1, c0, 0 @ write control reg
443
4a8d57a5 444 /* ?? invalidate for the second time? */
10c2df65
HC
445 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
446 mov pc, lr
447
1da177e4
LT
448__setup_mmu: sub r3, r4, #16384 @ Page directory size
449 bic r3, r3, #0xff @ Align the pointer
450 bic r3, r3, #0x3f00
451/*
452 * Initialise the page tables, turning on the cacheable and bufferable
453 * bits for the RAM area only.
454 */
455 mov r0, r3
f4619025
RK
456 mov r9, r0, lsr #18
457 mov r9, r9, lsl #18 @ start of RAM
458 add r10, r9, #0x10000000 @ a reasonable RAM size
1da177e4
LT
459 mov r1, #0x12
460 orr r1, r1, #3 << 10
461 add r2, r3, #16384
265d5e48 4621: cmp r1, r9 @ if virt > start of RAM
1da177e4 463 orrhs r1, r1, #0x0c @ set cacheable, bufferable
f4619025 464 cmp r1, r10 @ if virt > end of RAM
1da177e4
LT
465 bichs r1, r1, #0x0c @ clear cacheable, bufferable
466 str r1, [r0], #4 @ 1:1 mapping
467 add r1, r1, #1048576
468 teq r0, r2
469 bne 1b
470/*
471 * If ever we are running from Flash, then we surely want the cache
472 * to be enabled also for our execution instance... We map 2MB of it
473 * so there is no map overlap problem for up to 1 MB compressed kernel.
474 * If the execution is in RAM then we would only be duplicating the above.
475 */
476 mov r1, #0x1e
477 orr r1, r1, #3 << 10
bfa64c4a
DM
478 mov r2, pc
479 mov r2, r2, lsr #20
1da177e4
LT
480 orr r1, r1, r2, lsl #20
481 add r0, r3, r2, lsl #2
482 str r1, [r0], #4
483 add r1, r1, #1048576
484 str r1, [r0]
485 mov pc, lr
93ed3970 486ENDPROC(__setup_mmu)
1da177e4 487
c76b6b41 488__armv4_mmu_cache_on:
1da177e4 489 mov r12, lr
8bdca0ac 490#ifdef CONFIG_MMU
1da177e4
LT
491 bl __setup_mmu
492 mov r0, #0
493 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
494 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
495 mrc p15, 0, r0, c1, c0, 0 @ read control reg
496 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
497 orr r0, r0, #0x0030
26584853
CM
498#ifdef CONFIG_CPU_ENDIAN_BE8
499 orr r0, r0, #1 << 25 @ big-endian page tables
500#endif
c76b6b41 501 bl __common_mmu_cache_on
1da177e4
LT
502 mov r0, #0
503 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 504#endif
1da177e4
LT
505 mov pc, r12
506
7d09e854
CM
507__armv7_mmu_cache_on:
508 mov r12, lr
8bdca0ac 509#ifdef CONFIG_MMU
7d09e854
CM
510 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
511 tst r11, #0xf @ VMSA
512 blne __setup_mmu
513 mov r0, #0
514 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
515 tst r11, #0xf @ VMSA
516 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 517#endif
7d09e854
CM
518 mrc p15, 0, r0, c1, c0, 0 @ read control reg
519 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
520 orr r0, r0, #0x003c @ write buffer
8bdca0ac 521#ifdef CONFIG_MMU
26584853
CM
522#ifdef CONFIG_CPU_ENDIAN_BE8
523 orr r0, r0, #1 << 25 @ big-endian page tables
524#endif
7d09e854
CM
525 orrne r0, r0, #1 @ MMU enabled
526 movne r1, #-1
527 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
528 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
8bdca0ac 529#endif
7d09e854
CM
530 mcr p15, 0, r0, c1, c0, 0 @ load control register
531 mrc p15, 0, r0, c1, c0, 0 @ and read it back
532 mov r0, #0
533 mcr p15, 0, r0, c7, c5, 4 @ ISB
534 mov pc, r12
535
28853ac8
PZ
536__fa526_cache_on:
537 mov r12, lr
538 bl __setup_mmu
539 mov r0, #0
540 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
541 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
542 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
543 mrc p15, 0, r0, c1, c0, 0 @ read control reg
544 orr r0, r0, #0x1000 @ I-cache enable
545 bl __common_mmu_cache_on
546 mov r0, #0
547 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
548 mov pc, r12
549
c76b6b41 550__arm6_mmu_cache_on:
1da177e4
LT
551 mov r12, lr
552 bl __setup_mmu
553 mov r0, #0
554 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
555 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
556 mov r0, #0x30
c76b6b41 557 bl __common_mmu_cache_on
1da177e4
LT
558 mov r0, #0
559 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
560 mov pc, r12
561
c76b6b41 562__common_mmu_cache_on:
0e056f20 563#ifndef CONFIG_THUMB2_KERNEL
1da177e4
LT
564#ifndef DEBUG
565 orr r0, r0, #0x000d @ Write buffer, mmu
566#endif
567 mov r1, #-1
568 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
569 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
570 b 1f
571 .align 5 @ cache line aligned
5721: mcr p15, 0, r0, c1, c0, 0 @ load control register
573 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
574 sub pc, lr, r0, lsr #32 @ properly flush pipeline
0e056f20 575#endif
1da177e4 576
1da177e4
LT
577/*
578 * Here follow the relocatable cache support functions for the
579 * various processors. This is a generic hook for locating an
580 * entry and jumping to an instruction at the specified offset
581 * from the start of the block. Please note this is all position
582 * independent code.
583 *
584 * r1 = corrupted
585 * r2 = corrupted
586 * r3 = block offset
98e12b5a 587 * r9 = corrupted
1da177e4
LT
588 * r12 = corrupted
589 */
590
591call_cache_fn: adr r12, proc_types
f12d0d7c 592#ifdef CONFIG_CPU_CP15
98e12b5a 593 mrc p15, 0, r9, c0, c0 @ get processor ID
f12d0d7c 594#else
98e12b5a 595 ldr r9, =CONFIG_PROCESSOR_ID
f12d0d7c 596#endif
1da177e4
LT
5971: ldr r1, [r12, #0] @ get value
598 ldr r2, [r12, #4] @ get mask
98e12b5a 599 eor r1, r1, r9 @ (real ^ match)
1da177e4 600 tst r1, r2 @ & mask
0e056f20
CM
601 ARM( addeq pc, r12, r3 ) @ call cache function
602 THUMB( addeq r12, r3 )
603 THUMB( moveq pc, r12 ) @ call cache function
1da177e4
LT
604 add r12, r12, #4*5
605 b 1b
606
607/*
608 * Table for cache operations. This is basically:
609 * - CPU ID match
610 * - CPU ID mask
611 * - 'cache on' method instruction
612 * - 'cache off' method instruction
613 * - 'cache flush' method instruction
614 *
615 * We match an entry using: ((real_id ^ match) & mask) == 0
616 *
617 * Writethrough caches generally only need 'on' and 'off'
618 * methods. Writeback caches _must_ have the flush method
619 * defined.
620 */
88987ef9 621 .align 2
1da177e4
LT
622 .type proc_types,#object
623proc_types:
624 .word 0x41560600 @ ARM6/610
625 .word 0xffffffe0
0e056f20
CM
626 W(b) __arm6_mmu_cache_off @ works, but slow
627 W(b) __arm6_mmu_cache_off
1da177e4 628 mov pc, lr
0e056f20 629 THUMB( nop )
c76b6b41
HC
630@ b __arm6_mmu_cache_on @ untested
631@ b __arm6_mmu_cache_off
632@ b __armv3_mmu_cache_flush
1da177e4
LT
633
634 .word 0x00000000 @ old ARM ID
635 .word 0x0000f000
636 mov pc, lr
0e056f20 637 THUMB( nop )
1da177e4 638 mov pc, lr
0e056f20 639 THUMB( nop )
1da177e4 640 mov pc, lr
0e056f20 641 THUMB( nop )
1da177e4
LT
642
643 .word 0x41007000 @ ARM7/710
644 .word 0xfff8fe00
0e056f20
CM
645 W(b) __arm7_mmu_cache_off
646 W(b) __arm7_mmu_cache_off
1da177e4 647 mov pc, lr
0e056f20 648 THUMB( nop )
1da177e4
LT
649
650 .word 0x41807200 @ ARM720T (writethrough)
651 .word 0xffffff00
0e056f20
CM
652 W(b) __armv4_mmu_cache_on
653 W(b) __armv4_mmu_cache_off
1da177e4 654 mov pc, lr
0e056f20 655 THUMB( nop )
1da177e4 656
10c2df65
HC
657 .word 0x41007400 @ ARM74x
658 .word 0xff00ff00
0e056f20
CM
659 W(b) __armv3_mpu_cache_on
660 W(b) __armv3_mpu_cache_off
661 W(b) __armv3_mpu_cache_flush
10c2df65
HC
662
663 .word 0x41009400 @ ARM94x
664 .word 0xff00ff00
0e056f20
CM
665 W(b) __armv4_mpu_cache_on
666 W(b) __armv4_mpu_cache_off
667 W(b) __armv4_mpu_cache_flush
10c2df65 668
1da177e4
LT
669 .word 0x00007000 @ ARM7 IDs
670 .word 0x0000f000
671 mov pc, lr
0e056f20 672 THUMB( nop )
1da177e4 673 mov pc, lr
0e056f20 674 THUMB( nop )
1da177e4 675 mov pc, lr
0e056f20 676 THUMB( nop )
1da177e4
LT
677
678 @ Everything from here on will be the new ID system.
679
680 .word 0x4401a100 @ sa110 / sa1100
681 .word 0xffffffe0
0e056f20
CM
682 W(b) __armv4_mmu_cache_on
683 W(b) __armv4_mmu_cache_off
684 W(b) __armv4_mmu_cache_flush
1da177e4
LT
685
686 .word 0x6901b110 @ sa1110
687 .word 0xfffffff0
0e056f20
CM
688 W(b) __armv4_mmu_cache_on
689 W(b) __armv4_mmu_cache_off
690 W(b) __armv4_mmu_cache_flush
1da177e4 691
4157d317
HZ
692 .word 0x56056900
693 .word 0xffffff00 @ PXA9xx
0e056f20
CM
694 W(b) __armv4_mmu_cache_on
695 W(b) __armv4_mmu_cache_off
696 W(b) __armv4_mmu_cache_flush
49cbe786
EM
697
698 .word 0x56158000 @ PXA168
699 .word 0xfffff000
0e056f20
CM
700 W(b) __armv4_mmu_cache_on
701 W(b) __armv4_mmu_cache_off
702 W(b) __armv5tej_mmu_cache_flush
49cbe786 703
2e2023fe
NP
704 .word 0x56050000 @ Feroceon
705 .word 0xff0f0000
0e056f20
CM
706 W(b) __armv4_mmu_cache_on
707 W(b) __armv4_mmu_cache_off
708 W(b) __armv5tej_mmu_cache_flush
3ebb5a2b 709
5587931c
JS
710#ifdef CONFIG_CPU_FEROCEON_OLD_ID
711 /* this conflicts with the standard ARMv5TE entry */
712 .long 0x41009260 @ Old Feroceon
713 .long 0xff00fff0
714 b __armv4_mmu_cache_on
715 b __armv4_mmu_cache_off
716 b __armv5tej_mmu_cache_flush
717#endif
718
28853ac8
PZ
719 .word 0x66015261 @ FA526
720 .word 0xff01fff1
0e056f20
CM
721 W(b) __fa526_cache_on
722 W(b) __armv4_mmu_cache_off
723 W(b) __fa526_cache_flush
28853ac8 724
1da177e4
LT
725 @ These match on the architecture ID
726
727 .word 0x00020000 @ ARMv4T
728 .word 0x000f0000
0e056f20
CM
729 W(b) __armv4_mmu_cache_on
730 W(b) __armv4_mmu_cache_off
731 W(b) __armv4_mmu_cache_flush
1da177e4
LT
732
733 .word 0x00050000 @ ARMv5TE
734 .word 0x000f0000
0e056f20
CM
735 W(b) __armv4_mmu_cache_on
736 W(b) __armv4_mmu_cache_off
737 W(b) __armv4_mmu_cache_flush
1da177e4
LT
738
739 .word 0x00060000 @ ARMv5TEJ
740 .word 0x000f0000
0e056f20
CM
741 W(b) __armv4_mmu_cache_on
742 W(b) __armv4_mmu_cache_off
75216859 743 W(b) __armv5tej_mmu_cache_flush
1da177e4 744
45a7b9cf 745 .word 0x0007b000 @ ARMv6
7d09e854 746 .word 0x000ff000
0e056f20
CM
747 W(b) __armv4_mmu_cache_on
748 W(b) __armv4_mmu_cache_off
749 W(b) __armv6_mmu_cache_flush
1da177e4 750
edabd38e
SB
751 .word 0x560f5810 @ Marvell PJ4 ARMv6
752 .word 0xff0ffff0
753 W(b) __armv4_mmu_cache_on
754 W(b) __armv4_mmu_cache_off
755 W(b) __armv6_mmu_cache_flush
756
7d09e854
CM
757 .word 0x000f0000 @ new CPU Id
758 .word 0x000f0000
0e056f20
CM
759 W(b) __armv7_mmu_cache_on
760 W(b) __armv7_mmu_cache_off
761 W(b) __armv7_mmu_cache_flush
7d09e854 762
1da177e4
LT
763 .word 0 @ unrecognised type
764 .word 0
765 mov pc, lr
0e056f20 766 THUMB( nop )
1da177e4 767 mov pc, lr
0e056f20 768 THUMB( nop )
1da177e4 769 mov pc, lr
0e056f20 770 THUMB( nop )
1da177e4
LT
771
772 .size proc_types, . - proc_types
773
774/*
775 * Turn off the Cache and MMU. ARMv3 does not support
776 * reading the control register, but ARMv4 does.
777 *
21b2841d
UKK
778 * On exit,
779 * r0, r1, r2, r3, r9, r12 corrupted
780 * This routine must preserve:
6d7d0ae5 781 * r4, r7, r8
1da177e4
LT
782 */
783 .align 5
784cache_off: mov r3, #12 @ cache_off function
785 b call_cache_fn
786
10c2df65
HC
787__armv4_mpu_cache_off:
788 mrc p15, 0, r0, c1, c0
789 bic r0, r0, #0x000d
790 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
791 mov r0, #0
792 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
793 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
794 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
795 mov pc, lr
796
797__armv3_mpu_cache_off:
798 mrc p15, 0, r0, c1, c0
799 bic r0, r0, #0x000d
800 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
801 mov r0, #0
802 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
803 mov pc, lr
804
c76b6b41 805__armv4_mmu_cache_off:
8bdca0ac 806#ifdef CONFIG_MMU
1da177e4
LT
807 mrc p15, 0, r0, c1, c0
808 bic r0, r0, #0x000d
809 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
810 mov r0, #0
811 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
812 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
8bdca0ac 813#endif
1da177e4
LT
814 mov pc, lr
815
7d09e854
CM
816__armv7_mmu_cache_off:
817 mrc p15, 0, r0, c1, c0
8bdca0ac 818#ifdef CONFIG_MMU
7d09e854 819 bic r0, r0, #0x000d
8bdca0ac
CM
820#else
821 bic r0, r0, #0x000c
822#endif
7d09e854
CM
823 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
824 mov r12, lr
825 bl __armv7_mmu_cache_flush
826 mov r0, #0
8bdca0ac 827#ifdef CONFIG_MMU
7d09e854 828 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
8bdca0ac 829#endif
c30c2f99
CM
830 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
831 mcr p15, 0, r0, c7, c10, 4 @ DSB
832 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
833 mov pc, r12
834
c76b6b41 835__arm6_mmu_cache_off:
1da177e4 836 mov r0, #0x00000030 @ ARM6 control reg.
c76b6b41 837 b __armv3_mmu_cache_off
1da177e4 838
c76b6b41 839__arm7_mmu_cache_off:
1da177e4 840 mov r0, #0x00000070 @ ARM7 control reg.
c76b6b41 841 b __armv3_mmu_cache_off
1da177e4 842
c76b6b41 843__armv3_mmu_cache_off:
1da177e4
LT
844 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
845 mov r0, #0
846 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
847 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
848 mov pc, lr
849
850/*
851 * Clean and flush the cache to maintain consistency.
852 *
1da177e4 853 * On exit,
21b2841d 854 * r1, r2, r3, r9, r10, r11, r12 corrupted
1da177e4 855 * This routine must preserve:
6d7d0ae5 856 * r4, r6, r7, r8
1da177e4
LT
857 */
858 .align 5
859cache_clean_flush:
860 mov r3, #16
861 b call_cache_fn
862
10c2df65
HC
863__armv4_mpu_cache_flush:
864 mov r2, #1
865 mov r3, #0
866 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
867 mov r1, #7 << 5 @ 8 segments
8681: orr r3, r1, #63 << 26 @ 64 entries
8692: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
870 subs r3, r3, #1 << 26
871 bcs 2b @ entries 63 to 0
872 subs r1, r1, #1 << 5
873 bcs 1b @ segments 7 to 0
874
875 teq r2, #0
876 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
877 mcr p15, 0, ip, c7, c10, 4 @ drain WB
878 mov pc, lr
879
28853ac8
PZ
880__fa526_cache_flush:
881 mov r1, #0
882 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
883 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
884 mcr p15, 0, r1, c7, c10, 4 @ drain WB
885 mov pc, lr
10c2df65 886
c76b6b41 887__armv6_mmu_cache_flush:
1da177e4
LT
888 mov r1, #0
889 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
890 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
891 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
892 mcr p15, 0, r1, c7, c10, 4 @ drain WB
893 mov pc, lr
894
7d09e854
CM
895__armv7_mmu_cache_flush:
896 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
897 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 898 mov r10, #0
c30c2f99 899 beq hierarchical
7d09e854
CM
900 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
901 b iflush
902hierarchical:
c30c2f99 903 mcr p15, 0, r10, c7, c10, 5 @ DMB
0e056f20 904 stmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
905 mrc p15, 1, r0, c0, c0, 1 @ read clidr
906 ands r3, r0, #0x7000000 @ extract loc from clidr
907 mov r3, r3, lsr #23 @ left align loc bit field
908 beq finished @ if loc is 0, then no need to clean
909 mov r10, #0 @ start clean at cache level 0
910loop1:
911 add r2, r10, r10, lsr #1 @ work out 3x current cache level
912 mov r1, r0, lsr r2 @ extract cache type bits from clidr
913 and r1, r1, #7 @ mask of the bits for current cache only
914 cmp r1, #2 @ see what cache we have at this level
915 blt skip @ skip if no cache, or just i-cache
916 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
917 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
918 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
919 and r2, r1, #7 @ extract the length of the cache lines
920 add r2, r2, #4 @ add 4 (line length offset)
921 ldr r4, =0x3ff
922 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
000b5025 923 clz r5, r4 @ find bit position of way size increment
7d09e854
CM
924 ldr r7, =0x7fff
925 ands r7, r7, r1, lsr #13 @ extract max number of the index size
926loop2:
927 mov r9, r4 @ create working copy of max way size
928loop3:
0e056f20
CM
929 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
930 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
931 THUMB( lsl r6, r9, r5 )
932 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
933 THUMB( lsl r6, r7, r2 )
934 THUMB( orr r11, r11, r6 ) @ factor index number into r11
7d09e854
CM
935 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
936 subs r9, r9, #1 @ decrement the way
937 bge loop3
938 subs r7, r7, #1 @ decrement the index
939 bge loop2
940skip:
941 add r10, r10, #2 @ increment cache number
942 cmp r3, r10
943 bgt loop1
944finished:
0e056f20 945 ldmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
946 mov r10, #0 @ swith back to cache level 0
947 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
7d09e854 948iflush:
c30c2f99 949 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 950 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
951 mcr p15, 0, r10, c7, c10, 4 @ DSB
952 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
953 mov pc, lr
954
15754bf9
NP
955__armv5tej_mmu_cache_flush:
9561: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
957 bne 1b
958 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
959 mcr p15, 0, r0, c7, c10, 4 @ drain WB
960 mov pc, lr
961
c76b6b41 962__armv4_mmu_cache_flush:
1da177e4
LT
963 mov r2, #64*1024 @ default: 32K dcache size (*2)
964 mov r11, #32 @ default: 32 byte line size
965 mrc p15, 0, r3, c0, c0, 1 @ read cache type
98e12b5a 966 teq r3, r9 @ cache ID register present?
1da177e4
LT
967 beq no_cache_id
968 mov r1, r3, lsr #18
969 and r1, r1, #7
970 mov r2, #1024
971 mov r2, r2, lsl r1 @ base dcache size *2
972 tst r3, #1 << 14 @ test M bit
973 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
974 mov r3, r3, lsr #12
975 and r3, r3, #3
976 mov r11, #8
977 mov r11, r11, lsl r3 @ cache line size in bytes
978no_cache_id:
0e056f20
CM
979 mov r1, pc
980 bic r1, r1, #63 @ align to longest cache line
1da177e4 981 add r2, r1, r2
0e056f20
CM
9821:
983 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
984 THUMB( ldr r3, [r1] ) @ s/w flush D cache
985 THUMB( add r1, r1, r11 )
1da177e4
LT
986 teq r1, r2
987 bne 1b
988
989 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
990 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
991 mcr p15, 0, r1, c7, c10, 4 @ drain WB
992 mov pc, lr
993
c76b6b41 994__armv3_mmu_cache_flush:
10c2df65 995__armv3_mpu_cache_flush:
1da177e4 996 mov r1, #0
63fa7187 997 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1da177e4
LT
998 mov pc, lr
999
1000/*
1001 * Various debugging routines for printing hex characters and
1002 * memory, which again must be relocatable.
1003 */
1004#ifdef DEBUG
88987ef9 1005 .align 2
1da177e4
LT
1006 .type phexbuf,#object
1007phexbuf: .space 12
1008 .size phexbuf, . - phexbuf
1009
be6f9f00 1010@ phex corrupts {r0, r1, r2, r3}
1da177e4
LT
1011phex: adr r3, phexbuf
1012 mov r2, #0
1013 strb r2, [r3, r1]
10141: subs r1, r1, #1
1015 movmi r0, r3
1016 bmi puts
1017 and r2, r0, #15
1018 mov r0, r0, lsr #4
1019 cmp r2, #10
1020 addge r2, r2, #7
1021 add r2, r2, #'0'
1022 strb r2, [r3, r1]
1023 b 1b
1024
be6f9f00 1025@ puts corrupts {r0, r1, r2, r3}
4e6d488a 1026puts: loadsp r3, r1
1da177e4
LT
10271: ldrb r2, [r0], #1
1028 teq r2, #0
1029 moveq pc, lr
5cd0c344 10302: writeb r2, r3
1da177e4
LT
1031 mov r1, #0x00020000
10323: subs r1, r1, #1
1033 bne 3b
1034 teq r2, #'\n'
1035 moveq r2, #'\r'
1036 beq 2b
1037 teq r0, #0
1038 bne 1b
1039 mov pc, lr
be6f9f00 1040@ putc corrupts {r0, r1, r2, r3}
1da177e4
LT
1041putc:
1042 mov r2, r0
1043 mov r0, #0
4e6d488a 1044 loadsp r3, r1
1da177e4
LT
1045 b 2b
1046
be6f9f00 1047@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1da177e4
LT
1048memdump: mov r12, r0
1049 mov r10, lr
1050 mov r11, #0
10512: mov r0, r11, lsl #2
1052 add r0, r0, r12
1053 mov r1, #8
1054 bl phex
1055 mov r0, #':'
1056 bl putc
10571: mov r0, #' '
1058 bl putc
1059 ldr r0, [r12, r11, lsl #2]
1060 mov r1, #8
1061 bl phex
1062 and r0, r11, #7
1063 teq r0, #3
1064 moveq r0, #' '
1065 bleq putc
1066 and r0, r11, #7
1067 add r11, r11, #1
1068 teq r0, #7
1069 bne 1b
1070 mov r0, #'\n'
1071 bl putc
1072 cmp r11, #64
1073 blt 2b
1074 mov pc, r10
1075#endif
1076
92c83ff1 1077 .ltorg
1da177e4
LT
1078
1079 .align
b0c4d4ee 1080 .section ".stack", "aw", %nobits
1da177e4 1081user_stack: .space 4096
88237c25 1082user_stack_end: