]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/boot/compressed/head.S
ARM: 9036/1: uncompress: Fix dbgadtb size parameter name
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / boot / compressed / head.S
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
1da177e4
LT
2/*
3 * linux/arch/arm/boot/compressed/head.S
4 *
5 * Copyright (C) 1996-2002 Russell King
10c2df65 6 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4 7 */
1da177e4 8#include <linux/linkage.h>
424e5994 9#include <asm/assembler.h>
c20611df
JE
10#include <asm/v7m.h>
11
81a0bc39
RF
12#include "efi-header.S"
13
c20611df
JE
14 AR_CLASS( .arch armv7-a )
15 M_CLASS( .arch armv7-m )
1da177e4
LT
16
17/*
18 * Debugging stuff
19 *
20 * Note that these macros must not contain any code which is not
21 * 100% relocatable. Any attempt to do so will result in a crash.
22 * Please select one of the following when turning on debugging.
23 */
24#ifdef DEBUG
5cd0c344 25
5cd0c344 26#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9 27
dfad549d 28#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
e07e3c33 29 .macro loadsp, rb, tmp1, tmp2
7d95ded9 30 .endm
0b0c1dbd 31 .macro writeb, ch, rb, tmp
7d95ded9
TL
32 mcr p14, 0, \ch, c0, c5, 0
33 .endm
c633c3cf 34#elif defined(CONFIG_CPU_XSCALE)
e07e3c33 35 .macro loadsp, rb, tmp1, tmp2
c633c3cf 36 .endm
0b0c1dbd 37 .macro writeb, ch, rb, tmp
c633c3cf
JCPV
38 mcr p14, 0, \ch, c8, c0, 0
39 .endm
7d95ded9 40#else
e07e3c33 41 .macro loadsp, rb, tmp1, tmp2
1da177e4 42 .endm
0b0c1dbd 43 .macro writeb, ch, rb, tmp
41a9e680 44 mcr p14, 0, \ch, c1, c0, 0
1da177e4 45 .endm
7d95ded9
TL
46#endif
47
5cd0c344 48#else
224b5be6 49
4beba08b 50#include CONFIG_DEBUG_LL_INCLUDE
224b5be6 51
0b0c1dbd
LW
52 .macro writeb, ch, rb, tmp
53#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
54 waituartcts \tmp, \rb
55#endif
56 waituarttxrdy \tmp, \rb
5cd0c344 57 senduart \ch, \rb
0b0c1dbd 58 busyuart \tmp, \rb
1da177e4 59 .endm
5cd0c344 60
224b5be6 61#if defined(CONFIG_ARCH_SA1100)
e07e3c33 62 .macro loadsp, rb, tmp1, tmp2
1da177e4 63 mov \rb, #0x80000000 @ physical base address
224b5be6 64#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 65 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 66#else
1da177e4 67 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 68#endif
1da177e4 69 .endm
1da177e4 70#else
e07e3c33
ŁS
71 .macro loadsp, rb, tmp1, tmp2
72 addruart \rb, \tmp1, \tmp2
224b5be6 73 .endm
1da177e4 74#endif
5cd0c344 75#endif
1da177e4
LT
76#endif
77
78 .macro kputc,val
79 mov r0, \val
80 bl putc
81 .endm
82
83 .macro kphex,val,len
84 mov r0, \val
85 mov r1, #\len
86 bl phex
87 .endm
88
f3c89992
FC
89 /*
90 * Debug kernel copy by printing the memory addresses involved
91 */
92 .macro dbgkc, begin, end, cbegin, cend
93#ifdef DEBUG
f3c89992
FC
94 kputc #'C'
95 kputc #':'
96 kputc #'0'
97 kputc #'x'
98 kphex \begin, 8 /* Start of compressed kernel */
99 kputc #'-'
100 kputc #'0'
101 kputc #'x'
102 kphex \end, 8 /* End of compressed kernel */
103 kputc #'-'
104 kputc #'>'
105 kputc #'0'
106 kputc #'x'
107 kphex \cbegin, 8 /* Start of kernel copy */
108 kputc #'-'
109 kputc #'0'
110 kputc #'x'
111 kphex \cend, 8 /* End of kernel copy */
112 kputc #'\n'
f3c89992
FC
113#endif
114 .endm
115
c03e4147
LW
116 /*
117 * Debug print of the final appended DTB location
118 */
1ecec385 119 .macro dbgadtb, begin, size
c03e4147
LW
120#ifdef DEBUG
121 kputc #'D'
122 kputc #'T'
123 kputc #'B'
124 kputc #':'
125 kputc #'0'
126 kputc #'x'
127 kphex \begin, 8 /* Start of appended DTB */
128 kputc #' '
129 kputc #'('
130 kputc #'0'
131 kputc #'x'
1ecec385 132 kphex \size, 8 /* Size of appended DTB */
c03e4147
LW
133 kputc #')'
134 kputc #'\n'
135#endif
136 .endm
137
8239fc77
AB
138 .macro enable_cp15_barriers, reg
139 mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
140 tst \reg, #(1 << 5) @ CP15BEN bit set?
141 bne .L_\@
142 orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
143 mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
144 ARM( .inst 0xf57ff06f @ v7+ isb )
145 THUMB( isb )
146.L_\@:
147 .endm
148
184bf653
AB
149 /*
150 * The kernel build system appends the size of the
151 * decompressed kernel at the end of the compressed data
152 * in little-endian form.
153 */
154 .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
155 adr \res, .Linflated_image_size_offset
156 ldr \tmp1, [\res]
157 add \tmp1, \tmp1, \res @ address of inflated image size
158
159 ldrb \res, [\tmp1] @ get_unaligned_le32
160 ldrb \tmp2, [\tmp1, #1]
161 orr \res, \res, \tmp2, lsl #8
162 ldrb \tmp2, [\tmp1, #2]
163 ldrb \tmp1, [\tmp1, #3]
164 orr \res, \res, \tmp2, lsl #16
165 orr \res, \res, \tmp1, lsl #24
166 .endm
167
0557ac83
GU
168 .macro be32tocpu, val, tmp
169#ifndef __ARMEB__
170 /* convert to little endian */
171 eor \tmp, \val, \val, ror #16
172 bic \tmp, \tmp, #0x00ff0000
173 mov \val, \val, ror #8
174 eor \val, \val, \tmp, lsr #8
175#endif
176 .endm
177
790756c7 178 .section ".start", "ax"
1da177e4
LT
179/*
180 * sort out different calling conventions
181 */
182 .align
c20611df
JE
183 /*
184 * Always enter in ARM state for CPUs that support the ARM ISA.
185 * As of today (2014) that's exactly the members of the A and R
186 * classes.
187 */
188 AR_CLASS( .arm )
1da177e4
LT
189start:
190 .type start,#function
20699a42
LW
191 /*
192 * These 7 nops along with the 1 nop immediately below for
193 * !THUMB2 form 8 nops that make the compressed kernel bootable
194 * on legacy ARM systems that were assuming the kernel in a.out
195 * binary format. The boot loaders on these systems would
196 * jump 32 bytes into the image to skip the a.out header.
197 * with these 8 nops filling exactly 32 bytes, things still
198 * work as expected on these legacy systems. Thumb2 mode keeps
199 * 7 of the nops as it turns out that some boot loaders
200 * were patching the initial instructions of the kernel, i.e
201 * had started to exploit this "patch area".
202 */
b11fe388 203 .rept 7
81a0bc39 204 __nop
1da177e4 205 .endr
06a4b6d0 206#ifndef CONFIG_THUMB2_KERNEL
6583d829 207 __nop
06a4b6d0
AB
208#else
209 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
210 M_CLASS( nop.w ) @ M: already in Thumb2 mode
211 .thumb
212#endif
213 W(b) 1f
1da177e4 214
33656d56
NP
215 .word _magic_sig @ Magic numbers to help the loader
216 .word _magic_start @ absolute load/run zImage address
217 .word _magic_end @ zImage end address
9696fcae 218 .word 0x04030201 @ endianness flag
c7725687
RK
219 .word 0x45454545 @ another magic number to indicate
220 .word _magic_table @ additional data table
33656d56 221
06a4b6d0
AB
222 __EFI_HEADER
2231:
c20611df
JE
224 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
225 AR_CLASS( mrs r9, cpsr )
424e5994
DM
226#ifdef CONFIG_ARM_VIRT_EXT
227 bl __hyp_stub_install @ get into SVC mode, reversibly
228#endif
229 mov r7, r1 @ save architecture ID
f4619025 230 mov r8, r2 @ save atags pointer
1da177e4 231
c20611df 232#ifndef CONFIG_CPU_V7M
1da177e4
LT
233 /*
234 * Booting from Angel - need to enter SVC mode and disable
235 * FIQs/IRQs (numeric definitions from angel arm.h source).
236 * We only do this if we were in user mode on entry.
237 */
238 mrs r2, cpsr @ get current mode
239 tst r2, #3 @ not user?
240 bne not_angel
241 mov r0, #0x17 @ angel_SWIreason_EnterSVC
0e056f20
CM
242 ARM( swi 0x123456 ) @ angel_SWI_ARM
243 THUMB( svc 0xab ) @ angel_SWI_THUMB
1da177e4 244not_angel:
424e5994
DM
245 safe_svcmode_maskall r0
246 msr spsr_cxsf, r9 @ Save the CPU boot mode in
247 @ SPSR
c20611df 248#endif
1da177e4
LT
249 /*
250 * Note that some cache flushing and other stuff may
251 * be needed here - is there an Angel SWI call for this?
252 */
253
254 /*
255 * some architecture specific code can be inserted
f4619025 256 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
257 */
258
259 .text
6d7d0ae5 260
e69edc79 261#ifdef CONFIG_AUTO_ZRELADDR
0a6a78b8
RK
262 /*
263 * Find the start of physical memory. As we are executing
264 * without the MMU on, we are in the physical address space.
265 * We just need to get rid of any offset by aligning the
266 * address.
267 *
268 * This alignment is a balance between the requirements of
269 * different platforms - we have chosen 128MB to allow
270 * platforms which align the start of their physical memory
271 * to 128MB to use this feature, while allowing the zImage
272 * to be placed within the first 128MB of memory on other
273 * platforms. Increasing the alignment means we place
274 * stricter alignment requirements on the start of physical
275 * memory, but relaxing it means that we break people who
276 * are already placing their zImage in (eg) the top 64MB
277 * of this range.
278 */
bfa64c4a
DM
279 mov r4, pc
280 and r4, r4, #0xf8000000
0a6a78b8 281 /* Determine final kernel image address. */
e69edc79
EM
282 add r4, r4, #TEXT_OFFSET
283#else
9e84ed63 284 ldr r4, =zreladdr
e69edc79 285#endif
1da177e4 286
2874865c
NP
287 /*
288 * Set up a page table only if it won't overwrite ourself.
7d57909b 289 * That means r4 < pc || r4 - 16k page directory > &_end.
2874865c
NP
290 * Given that r4 > &_end is most unfrequent, we add a rough
291 * additional 1MB of room for a possible appended DTB.
292 */
293 mov r0, pc
294 cmp r0, r4
691cbe5b 295 ldrcc r0, .Lheadroom
2874865c
NP
296 addcc r0, r0, pc
297 cmpcc r4, r0
298 orrcc r4, r4, #1 @ remember we skipped cache_on
299 blcs cache_on
6d7d0ae5 300
161e04a5
AB
301restart: adr r0, LC1
302 ldr sp, [r0]
303 ldr r6, [r0, #4]
304 add sp, sp, r0
305 add r6, r6, r0
6d7d0ae5 306
184bf653 307 get_inflated_image_size r9, r10, lr
1da177e4 308
6d7d0ae5
NP
309#ifndef CONFIG_ZBOOT_ROM
310 /* malloc space is above the relocated stack (64k max) */
adc5f702 311 add r10, sp, #MALLOC_SIZE
6d7d0ae5 312#else
1da177e4 313 /*
6d7d0ae5
NP
314 * With ZBOOT_ROM the bss/stack is non relocatable,
315 * but someone could still run this code from RAM,
316 * in which case our reference is _edata.
1da177e4 317 */
6d7d0ae5
NP
318 mov r10, r6
319#endif
320
e2a6a3aa
JB
321 mov r5, #0 @ init dtb size to 0
322#ifdef CONFIG_ARM_APPENDED_DTB
323/*
2874865c 324 * r4 = final kernel address (possibly with LSB set)
e2a6a3aa
JB
325 * r5 = appended dtb size (still unknown)
326 * r6 = _edata
327 * r7 = architecture ID
328 * r8 = atags/device tree pointer
329 * r9 = size of decompressed image
330 * r10 = end of this image, including bss/stack/malloc space if non XIP
e2a6a3aa
JB
331 * sp = stack pointer
332 *
333 * if there are device trees (dtb) appended to zImage, advance r10 so that the
334 * dtb data will get relocated along with the kernel if necessary.
335 */
336
337 ldr lr, [r6, #0]
338#ifndef __ARMEB__
339 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
340#else
341 ldr r1, =0xd00dfeed
342#endif
343 cmp lr, r1
344 bne dtb_check_done @ not found
345
b90b9a38
NP
346#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
347 /*
348 * OK... Let's do some funky business here.
349 * If we do have a DTB appended to zImage, and we do have
350 * an ATAG list around, we want the later to be translated
c2607f74
NP
351 * and folded into the former here. No GOT fixup has occurred
352 * yet, but none of the code we're about to call uses any
353 * global variable.
b90b9a38 354 */
c2607f74
NP
355
356 /* Get the initial DTB size */
357 ldr r5, [r6, #4]
0557ac83 358 be32tocpu r5, r1
c03e4147 359 dbgadtb r6, r5
c2607f74
NP
360 /* 50% DTB growth should be good enough */
361 add r5, r5, r5, lsr #1
362 /* preserve 64-bit alignment */
363 add r5, r5, #7
364 bic r5, r5, #7
365 /* clamp to 32KB min and 1MB max */
366 cmp r5, #(1 << 15)
367 movlo r5, #(1 << 15)
368 cmp r5, #(1 << 20)
369 movhi r5, #(1 << 20)
370 /* temporarily relocate the stack past the DTB work space */
371 add sp, sp, r5
372
b90b9a38
NP
373 mov r0, r8
374 mov r1, r6
c2607f74 375 mov r2, r5
b90b9a38
NP
376 bl atags_to_fdt
377
378 /*
379 * If returned value is 1, there is no ATAG at the location
380 * pointed by r8. Try the typical 0x100 offset from start
381 * of RAM and hope for the best.
382 */
383 cmp r0, #1
531a6a94 384 sub r0, r4, #TEXT_OFFSET
2874865c 385 bic r0, r0, #1
531a6a94 386 add r0, r0, #0x100
b90b9a38 387 mov r1, r6
c2607f74 388 mov r2, r5
9c5fd9e8 389 bleq atags_to_fdt
b90b9a38 390
c2607f74 391 sub sp, sp, r5
b90b9a38
NP
392#endif
393
e2a6a3aa
JB
394 mov r8, r6 @ use the appended device tree
395
5ffb04f6
NP
396 /*
397 * Make sure that the DTB doesn't end up in the final
398 * kernel's .bss area. To do so, we adjust the decompressed
399 * kernel size to compensate if that .bss size is larger
400 * than the relocated code.
401 */
402 ldr r5, =_kernel_bss_size
403 adr r1, wont_overwrite
404 sub r1, r6, r1
405 subs r1, r5, r1
406 addhi r9, r9, r1
407
c2607f74 408 /* Get the current DTB size */
e2a6a3aa 409 ldr r5, [r6, #4]
0557ac83 410 be32tocpu r5, r1
e2a6a3aa
JB
411
412 /* preserve 64-bit alignment */
413 add r5, r5, #7
414 bic r5, r5, #7
415
416 /* relocate some pointers past the appended dtb */
417 add r6, r6, r5
418 add r10, r10, r5
419 add sp, sp, r5
420dtb_check_done:
421#endif
422
6d7d0ae5
NP
423/*
424 * Check to see if we will overwrite ourselves.
2874865c 425 * r4 = final kernel address (possibly with LSB set)
6d7d0ae5
NP
426 * r9 = size of decompressed image
427 * r10 = end of this image, including bss/stack/malloc space if non XIP
428 * We basically want:
ea9df3b1 429 * r4 - 16k page directory >= r10 -> OK
5ffb04f6 430 * r4 + image length <= address of wont_overwrite -> OK
2874865c 431 * Note: the possible LSB in r4 is harmless here.
6d7d0ae5 432 */
ea9df3b1 433 add r10, r10, #16384
6d7d0ae5
NP
434 cmp r4, r10
435 bhs wont_overwrite
436 add r10, r4, r9
5ffb04f6
NP
437 adr r9, wont_overwrite
438 cmp r10, r9
6d7d0ae5
NP
439 bls wont_overwrite
440
441/*
442 * Relocate ourselves past the end of the decompressed kernel.
6d7d0ae5
NP
443 * r6 = _edata
444 * r10 = end of the decompressed kernel
445 * Because we always copy ahead, we need to do it from the end and go
446 * backward in case the source and destination overlap.
447 */
adcc2591
NP
448 /*
449 * Bump to the next 256-byte boundary with the size of
450 * the relocation code added. This avoids overwriting
451 * ourself when the offset is small.
452 */
453 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
6d7d0ae5
NP
454 bic r10, r10, #255
455
adcc2591
NP
456 /* Get start of code we want to copy and align it down. */
457 adr r5, restart
458 bic r5, r5, #31
459
424e5994
DM
460/* Relocate the hyp vector base if necessary */
461#ifdef CONFIG_ARM_VIRT_EXT
462 mrs r0, spsr
463 and r0, r0, #MODE_MASK
464 cmp r0, #HYP_MODE
465 bne 1f
466
4897e36c
MZ
467 /*
468 * Compute the address of the hyp vectors after relocation.
469 * This requires some arithmetic since we cannot directly
470 * reference __hyp_stub_vectors in a PC-relative way.
471 * Call __hyp_set_vectors with the new address so that we
472 * can HVC again after the copy.
473 */
4740: adr r0, 0b
475 movw r1, #:lower16:__hyp_stub_vectors - 0b
476 movt r1, #:upper16:__hyp_stub_vectors - 0b
477 add r0, r0, r1
424e5994
DM
478 sub r0, r0, r5
479 add r0, r0, r10
480 bl __hyp_set_vectors
4811:
482#endif
483
6d7d0ae5
NP
484 sub r9, r6, r5 @ size to copy
485 add r9, r9, #31 @ rounded up to a multiple
486 bic r9, r9, #31 @ ... of 32 bytes
487 add r6, r9, r5
488 add r9, r9, r10
489
f3c89992
FC
490#ifdef DEBUG
491 sub r10, r6, r5
492 sub r10, r9, r10
493 /*
494 * We are about to copy the kernel to a new memory area.
495 * The boundaries of the new memory area can be found in
496 * r10 and r9, whilst r5 and r6 contain the boundaries
497 * of the memory we are going to copy.
498 * Calling dbgkc will help with the printing of this
499 * information.
500 */
501 dbgkc r5, r6, r10, r9
502#endif
503
6d7d0ae5
NP
5041: ldmdb r6!, {r0 - r3, r10 - r12, lr}
505 cmp r6, r5
506 stmdb r9!, {r0 - r3, r10 - r12, lr}
507 bhi 1b
508
509 /* Preserve offset to relocated code. */
510 sub r6, r9, r6
511
e114412f
AB
512 mov r0, r9 @ start of relocated zImage
513 add r1, sp, r6 @ end of relocated zImage
238962ac 514 bl cache_clean_flush
6d7d0ae5 515
14327c66 516 badr r0, restart
6d7d0ae5
NP
517 add r0, r0, r6
518 mov pc, r0
519
520wont_overwrite:
f1f012b0
AB
521 adr r0, LC0
522 ldmia r0, {r1, r2, r3, r11, r12}
523 sub r0, r0, r1 @ calculate the delta offset
524
6d7d0ae5
NP
525/*
526 * If delta is zero, we are running at the address we were linked at.
527 * r0 = delta
528 * r2 = BSS start
529 * r3 = BSS end
2874865c 530 * r4 = kernel execution address (possibly with LSB set)
e2a6a3aa 531 * r5 = appended dtb size (0 if not present)
6d7d0ae5
NP
532 * r7 = architecture ID
533 * r8 = atags pointer
534 * r11 = GOT start
535 * r12 = GOT end
536 * sp = stack pointer
537 */
e2a6a3aa 538 orrs r1, r0, r5
6d7d0ae5 539 beq not_relocated
e2a6a3aa 540
98e12b5a 541 add r11, r11, r0
6d7d0ae5 542 add r12, r12, r0
1da177e4
LT
543
544#ifndef CONFIG_ZBOOT_ROM
545 /*
546 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
547 * we need to fix up pointers into the BSS region.
6d7d0ae5 548 * Note that the stack pointer has already been fixed up.
1da177e4
LT
549 */
550 add r2, r2, r0
551 add r3, r3, r0
1da177e4
LT
552
553 /*
554 * Relocate all entries in the GOT table.
e2a6a3aa 555 * Bump bss entries to _edata + dtb size
1da177e4 556 */
98e12b5a 5571: ldr r1, [r11, #0] @ relocate entries in the GOT
e2a6a3aa
JB
558 add r1, r1, r0 @ This fixes up C references
559 cmp r1, r2 @ if entry >= bss_start &&
560 cmphs r3, r1 @ bss_end > entry
561 addhi r1, r1, r5 @ entry += dtb size
562 str r1, [r11], #4 @ next entry
6d7d0ae5 563 cmp r11, r12
1da177e4 564 blo 1b
e2a6a3aa
JB
565
566 /* bump our bss pointers too */
567 add r2, r2, r5
568 add r3, r3, r5
569
1da177e4
LT
570#else
571
572 /*
573 * Relocate entries in the GOT table. We only relocate
574 * the entries that are outside the (relocated) BSS region.
575 */
98e12b5a 5761: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4
LT
577 cmp r1, r2 @ entry < bss_start ||
578 cmphs r3, r1 @ _end < entry
579 addlo r1, r1, r0 @ table. This fixes up the
98e12b5a 580 str r1, [r11], #4 @ C references.
6d7d0ae5 581 cmp r11, r12
1da177e4
LT
582 blo 1b
583#endif
584
585not_relocated: mov r0, #0
5861: str r0, [r2], #4 @ clear bss
587 str r0, [r2], #4
588 str r0, [r2], #4
589 str r0, [r2], #4
590 cmp r2, r3
591 blo 1b
592
2874865c
NP
593 /*
594 * Did we skip the cache setup earlier?
595 * That is indicated by the LSB in r4.
596 * Do it now if so.
597 */
598 tst r4, #1
599 bic r4, r4, #1
600 blne cache_on
601
1da177e4 602/*
6d7d0ae5
NP
603 * The C runtime environment should now be setup sufficiently.
604 * Set up some pointers, and start decompressing.
605 * r4 = kernel execution address
606 * r7 = architecture ID
607 * r8 = atags pointer
1da177e4 608 */
6d7d0ae5
NP
609 mov r0, r4
610 mov r1, sp @ malloc space above stack
adc5f702 611 add r2, sp, #MALLOC_SIZE @ 64k max
1da177e4
LT
612 mov r3, r7
613 bl decompress_kernel
e114412f
AB
614
615 get_inflated_image_size r1, r2, r3
616
617 mov r0, r4 @ start of inflated image
618 add r1, r1, r0 @ end of inflated image
1da177e4 619 bl cache_clean_flush
6d7d0ae5 620 bl cache_off
424e5994
DM
621
622#ifdef CONFIG_ARM_VIRT_EXT
623 mrs r0, spsr @ Get saved CPU boot mode
624 and r0, r0, #MODE_MASK
625 cmp r0, #HYP_MODE @ if not booted in HYP mode...
626 bne __enter_kernel @ boot kernel directly
627
628 adr r12, .L__hyp_reentry_vectors_offset
629 ldr r0, [r12]
630 add r0, r0, r12
631
632 bl __hyp_set_vectors
633 __HVC(0) @ otherwise bounce to hyp mode
634
635 b . @ should never be reached
636
637 .align 2
638.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
639#else
640 b __enter_kernel
641#endif
1da177e4 642
88987ef9 643 .align 2
1da177e4
LT
644 .type LC0, #object
645LC0: .word LC0 @ r1
646 .word __bss_start @ r2
647 .word _end @ r3
98e12b5a 648 .word _got_start @ r11
1da177e4 649 .word _got_end @ ip
1da177e4
LT
650 .size LC0, . - LC0
651
161e04a5
AB
652 .type LC1, #object
653LC1: .word .L_user_stack_end - LC1 @ sp
654 .word _edata - LC1 @ r6
655 .size LC1, . - LC1
656
691cbe5b
AB
657.Lheadroom:
658 .word _end - restart + 16384 + 1024*1024
659
184bf653
AB
660.Linflated_image_size_offset:
661 .long (input_data_end - 4) - .
662
1da177e4
LT
663#ifdef CONFIG_ARCH_RPC
664 .globl params
db7b2b4b 665params: ldr r0, =0x10000100 @ params_phys for RPC
1da177e4
LT
666 mov pc, lr
667 .ltorg
668 .align
669#endif
670
401b368c
AB
671/*
672 * dcache_line_size - get the minimum D-cache line size from the CTR register
673 * on ARMv7.
674 */
675 .macro dcache_line_size, reg, tmp
676#ifdef CONFIG_CPU_V7M
677 movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
678 movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
679 ldr \tmp, [\tmp]
680#else
681 mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
682#endif
683 lsr \tmp, \tmp, #16
684 and \tmp, \tmp, #0xf @ cache line size encoding
685 mov \reg, #4 @ bytes per word
686 mov \reg, \reg, lsl \tmp @ actual cache line size
687 .endm
688
1da177e4
LT
689/*
690 * Turn on the cache. We need to setup some page tables so that we
691 * can have both the I and D caches on.
692 *
693 * We place the page tables 16k down from the kernel execution address,
694 * and we hope that nothing else is using it. If we're using it, we
695 * will go pop!
696 *
697 * On entry,
698 * r4 = kernel execution address
1da177e4 699 * r7 = architecture number
f4619025 700 * r8 = atags pointer
1da177e4 701 * On exit,
21b2841d 702 * r0, r1, r2, r3, r9, r10, r12 corrupted
1da177e4 703 * This routine must preserve:
6d7d0ae5 704 * r4, r7, r8
1da177e4
LT
705 */
706 .align 5
707cache_on: mov r3, #8 @ cache_on function
708 b call_cache_fn
709
10c2df65
HC
710/*
711 * Initialize the highest priority protection region, PR7
712 * to cover all 32bit address and cacheable and bufferable.
713 */
714__armv4_mpu_cache_on:
715 mov r0, #0x3f @ 4G, the whole
716 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
717 mcr p15, 0, r0, c6, c7, 1
718
719 mov r0, #0x80 @ PR7
720 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
721 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
722 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
723
724 mov r0, #0xc000
725 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
726 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
727
728 mov r0, #0
729 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
730 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
731 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
732 mrc p15, 0, r0, c1, c0, 0 @ read control reg
733 @ ...I .... ..D. WC.M
734 orr r0, r0, #0x002d @ .... .... ..1. 11.1
735 orr r0, r0, #0x1000 @ ...1 .... .... ....
736
737 mcr p15, 0, r0, c1, c0, 0 @ write control reg
738
739 mov r0, #0
740 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
741 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
742 mov pc, lr
743
744__armv3_mpu_cache_on:
745 mov r0, #0x3f @ 4G, the whole
746 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
747
748 mov r0, #0x80 @ PR7
749 mcr p15, 0, r0, c2, c0, 0 @ cache on
750 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
751
752 mov r0, #0xc000
753 mcr p15, 0, r0, c5, c0, 0 @ access permission
754
755 mov r0, #0
756 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
4a8d57a5
UKK
757 /*
758 * ?? ARMv3 MMU does not allow reading the control register,
759 * does this really work on ARMv3 MPU?
760 */
10c2df65
HC
761 mrc p15, 0, r0, c1, c0, 0 @ read control reg
762 @ .... .... .... WC.M
763 orr r0, r0, #0x000d @ .... .... .... 11.1
4a8d57a5 764 /* ?? this overwrites the value constructed above? */
10c2df65
HC
765 mov r0, #0
766 mcr p15, 0, r0, c1, c0, 0 @ write control reg
767
4a8d57a5 768 /* ?? invalidate for the second time? */
10c2df65
HC
769 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
770 mov pc, lr
771
1fdc08ab
RK
772#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
773#define CB_BITS 0x08
774#else
775#define CB_BITS 0x0c
776#endif
777
1da177e4
LT
778__setup_mmu: sub r3, r4, #16384 @ Page directory size
779 bic r3, r3, #0xff @ Align the pointer
780 bic r3, r3, #0x3f00
781/*
782 * Initialise the page tables, turning on the cacheable and bufferable
783 * bits for the RAM area only.
784 */
785 mov r0, r3
f4619025
RK
786 mov r9, r0, lsr #18
787 mov r9, r9, lsl #18 @ start of RAM
788 add r10, r9, #0x10000000 @ a reasonable RAM size
1fdc08ab
RK
789 mov r1, #0x12 @ XN|U + section mapping
790 orr r1, r1, #3 << 10 @ AP=11
1da177e4 791 add r2, r3, #16384
265d5e48 7921: cmp r1, r9 @ if virt > start of RAM
1fdc08ab
RK
793 cmphs r10, r1 @ && end of RAM > virt
794 bic r1, r1, #0x1c @ clear XN|U + C + B
795 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
796 orrhs r1, r1, r6 @ set RAM section settings
1da177e4
LT
797 str r1, [r0], #4 @ 1:1 mapping
798 add r1, r1, #1048576
799 teq r0, r2
800 bne 1b
801/*
802 * If ever we are running from Flash, then we surely want the cache
803 * to be enabled also for our execution instance... We map 2MB of it
804 * so there is no map overlap problem for up to 1 MB compressed kernel.
805 * If the execution is in RAM then we would only be duplicating the above.
806 */
1fdc08ab 807 orr r1, r6, #0x04 @ ensure B is set for this
1da177e4 808 orr r1, r1, #3 << 10
bfa64c4a
DM
809 mov r2, pc
810 mov r2, r2, lsr #20
1da177e4
LT
811 orr r1, r1, r2, lsl #20
812 add r0, r3, r2, lsl #2
813 str r1, [r0], #4
814 add r1, r1, #1048576
815 str r1, [r0]
816 mov pc, lr
93ed3970 817ENDPROC(__setup_mmu)
1da177e4 818
5010192d
DM
819@ Enable unaligned access on v6, to allow better code generation
820@ for the decompressor C code:
821__armv6_mmu_cache_on:
822 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
823 bic r0, r0, #2 @ A (no unaligned access fault)
824 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
825 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
826 b __armv4_mmu_cache_on
827
af3e4fd3
MG
828__arm926ejs_mmu_cache_on:
829#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
830 mov r0, #4 @ put dcache in WT mode
831 mcr p15, 7, r0, c15, c0, 0
832#endif
833
c76b6b41 834__armv4_mmu_cache_on:
1da177e4 835 mov r12, lr
8bdca0ac 836#ifdef CONFIG_MMU
1fdc08ab 837 mov r6, #CB_BITS | 0x12 @ U
1da177e4
LT
838 bl __setup_mmu
839 mov r0, #0
840 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
841 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
842 mrc p15, 0, r0, c1, c0, 0 @ read control reg
843 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
844 orr r0, r0, #0x0030
457c2403 845 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
c76b6b41 846 bl __common_mmu_cache_on
1da177e4
LT
847 mov r0, #0
848 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 849#endif
1da177e4
LT
850 mov pc, r12
851
7d09e854 852__armv7_mmu_cache_on:
8239fc77 853 enable_cp15_barriers r11
7d09e854 854 mov r12, lr
8bdca0ac 855#ifdef CONFIG_MMU
7d09e854
CM
856 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
857 tst r11, #0xf @ VMSA
1fdc08ab 858 movne r6, #CB_BITS | 0x02 @ !XN
7d09e854
CM
859 blne __setup_mmu
860 mov r0, #0
861 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
862 tst r11, #0xf @ VMSA
863 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 864#endif
7d09e854 865 mrc p15, 0, r0, c1, c0, 0 @ read control reg
e1e5b7e4 866 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
7d09e854
CM
867 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
868 orr r0, r0, #0x003c @ write buffer
5010192d
DM
869 bic r0, r0, #2 @ A (no unaligned access fault)
870 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
871 @ (needed for ARM1176)
8bdca0ac 872#ifdef CONFIG_MMU
457c2403 873 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
dbece458 874 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
7d09e854 875 orrne r0, r0, #1 @ MMU enabled
1fdc08ab 876 movne r1, #0xfffffffd @ domain 0 = client
dbece458 877 bic r6, r6, #1 << 31 @ 32-bit translation system
117e5e9c 878 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
7d09e854
CM
879 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
880 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
dbece458 881 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
8bdca0ac 882#endif
d675d0bc 883 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
884 mcr p15, 0, r0, c1, c0, 0 @ load control register
885 mrc p15, 0, r0, c1, c0, 0 @ and read it back
886 mov r0, #0
887 mcr p15, 0, r0, c7, c5, 4 @ ISB
888 mov pc, r12
889
28853ac8
PZ
890__fa526_cache_on:
891 mov r12, lr
1fdc08ab 892 mov r6, #CB_BITS | 0x12 @ U
28853ac8
PZ
893 bl __setup_mmu
894 mov r0, #0
895 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
896 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
897 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
898 mrc p15, 0, r0, c1, c0, 0 @ read control reg
899 orr r0, r0, #0x1000 @ I-cache enable
900 bl __common_mmu_cache_on
901 mov r0, #0
902 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
903 mov pc, r12
904
c76b6b41 905__common_mmu_cache_on:
0e056f20 906#ifndef CONFIG_THUMB2_KERNEL
1da177e4
LT
907#ifndef DEBUG
908 orr r0, r0, #0x000d @ Write buffer, mmu
909#endif
910 mov r1, #-1
911 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
912 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
913 b 1f
914 .align 5 @ cache line aligned
9151: mcr p15, 0, r0, c1, c0, 0 @ load control register
916 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
917 sub pc, lr, r0, lsr #32 @ properly flush pipeline
0e056f20 918#endif
1da177e4 919
946a105e
DM
920#define PROC_ENTRY_SIZE (4*5)
921
1da177e4
LT
922/*
923 * Here follow the relocatable cache support functions for the
924 * various processors. This is a generic hook for locating an
925 * entry and jumping to an instruction at the specified offset
926 * from the start of the block. Please note this is all position
927 * independent code.
928 *
929 * r1 = corrupted
930 * r2 = corrupted
931 * r3 = block offset
98e12b5a 932 * r9 = corrupted
1da177e4
LT
933 * r12 = corrupted
934 */
935
936call_cache_fn: adr r12, proc_types
f12d0d7c 937#ifdef CONFIG_CPU_CP15
98e12b5a 938 mrc p15, 0, r9, c0, c0 @ get processor ID
c20611df
JE
939#elif defined(CONFIG_CPU_V7M)
940 /*
941 * On v7-M the processor id is located in the V7M_SCB_CPUID
942 * register, but as cache handling is IMPLEMENTATION DEFINED on
943 * v7-M (if existant at all) we just return early here.
944 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
945 * __armv7_mmu_cache_{on,off,flush}) would be selected which
946 * use cp15 registers that are not implemented on v7-M.
947 */
948 bx lr
f12d0d7c 949#else
98e12b5a 950 ldr r9, =CONFIG_PROCESSOR_ID
f12d0d7c 951#endif
1da177e4
LT
9521: ldr r1, [r12, #0] @ get value
953 ldr r2, [r12, #4] @ get mask
98e12b5a 954 eor r1, r1, r9 @ (real ^ match)
1da177e4 955 tst r1, r2 @ & mask
0e056f20
CM
956 ARM( addeq pc, r12, r3 ) @ call cache function
957 THUMB( addeq r12, r3 )
958 THUMB( moveq pc, r12 ) @ call cache function
946a105e 959 add r12, r12, #PROC_ENTRY_SIZE
1da177e4
LT
960 b 1b
961
962/*
963 * Table for cache operations. This is basically:
964 * - CPU ID match
965 * - CPU ID mask
966 * - 'cache on' method instruction
967 * - 'cache off' method instruction
968 * - 'cache flush' method instruction
969 *
970 * We match an entry using: ((real_id ^ match) & mask) == 0
971 *
972 * Writethrough caches generally only need 'on' and 'off'
973 * methods. Writeback caches _must_ have the flush method
974 * defined.
975 */
88987ef9 976 .align 2
1da177e4
LT
977 .type proc_types,#object
978proc_types:
ced2a3b8
M
979 .word 0x41000000 @ old ARM ID
980 .word 0xff00f000
1da177e4 981 mov pc, lr
0e056f20 982 THUMB( nop )
1da177e4 983 mov pc, lr
0e056f20 984 THUMB( nop )
1da177e4 985 mov pc, lr
0e056f20 986 THUMB( nop )
1da177e4
LT
987
988 .word 0x41007000 @ ARM7/710
989 .word 0xfff8fe00
4cdfc2ec
RK
990 mov pc, lr
991 THUMB( nop )
992 mov pc, lr
993 THUMB( nop )
1da177e4 994 mov pc, lr
0e056f20 995 THUMB( nop )
1da177e4
LT
996
997 .word 0x41807200 @ ARM720T (writethrough)
998 .word 0xffffff00
0e056f20
CM
999 W(b) __armv4_mmu_cache_on
1000 W(b) __armv4_mmu_cache_off
1da177e4 1001 mov pc, lr
0e056f20 1002 THUMB( nop )
1da177e4 1003
10c2df65
HC
1004 .word 0x41007400 @ ARM74x
1005 .word 0xff00ff00
0e056f20
CM
1006 W(b) __armv3_mpu_cache_on
1007 W(b) __armv3_mpu_cache_off
1008 W(b) __armv3_mpu_cache_flush
10c2df65
HC
1009
1010 .word 0x41009400 @ ARM94x
1011 .word 0xff00ff00
0e056f20
CM
1012 W(b) __armv4_mpu_cache_on
1013 W(b) __armv4_mpu_cache_off
1014 W(b) __armv4_mpu_cache_flush
10c2df65 1015
af3e4fd3
MG
1016 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
1017 .word 0xff0ffff0
720c60e1
NP
1018 W(b) __arm926ejs_mmu_cache_on
1019 W(b) __armv4_mmu_cache_off
1020 W(b) __armv5tej_mmu_cache_flush
10c2df65 1021
1da177e4
LT
1022 .word 0x00007000 @ ARM7 IDs
1023 .word 0x0000f000
1024 mov pc, lr
0e056f20 1025 THUMB( nop )
1da177e4 1026 mov pc, lr
0e056f20 1027 THUMB( nop )
1da177e4 1028 mov pc, lr
0e056f20 1029 THUMB( nop )
1da177e4
LT
1030
1031 @ Everything from here on will be the new ID system.
1032
1033 .word 0x4401a100 @ sa110 / sa1100
1034 .word 0xffffffe0
0e056f20
CM
1035 W(b) __armv4_mmu_cache_on
1036 W(b) __armv4_mmu_cache_off
1037 W(b) __armv4_mmu_cache_flush
1da177e4
LT
1038
1039 .word 0x6901b110 @ sa1110
1040 .word 0xfffffff0
0e056f20
CM
1041 W(b) __armv4_mmu_cache_on
1042 W(b) __armv4_mmu_cache_off
1043 W(b) __armv4_mmu_cache_flush
1da177e4 1044
4157d317
HZ
1045 .word 0x56056900
1046 .word 0xffffff00 @ PXA9xx
0e056f20
CM
1047 W(b) __armv4_mmu_cache_on
1048 W(b) __armv4_mmu_cache_off
1049 W(b) __armv4_mmu_cache_flush
49cbe786
EM
1050
1051 .word 0x56158000 @ PXA168
1052 .word 0xfffff000
0e056f20
CM
1053 W(b) __armv4_mmu_cache_on
1054 W(b) __armv4_mmu_cache_off
1055 W(b) __armv5tej_mmu_cache_flush
49cbe786 1056
2e2023fe
NP
1057 .word 0x56050000 @ Feroceon
1058 .word 0xff0f0000
0e056f20
CM
1059 W(b) __armv4_mmu_cache_on
1060 W(b) __armv4_mmu_cache_off
1061 W(b) __armv5tej_mmu_cache_flush
3ebb5a2b 1062
5587931c
JS
1063#ifdef CONFIG_CPU_FEROCEON_OLD_ID
1064 /* this conflicts with the standard ARMv5TE entry */
1065 .long 0x41009260 @ Old Feroceon
1066 .long 0xff00fff0
1067 b __armv4_mmu_cache_on
1068 b __armv4_mmu_cache_off
1069 b __armv5tej_mmu_cache_flush
1070#endif
1071
28853ac8
PZ
1072 .word 0x66015261 @ FA526
1073 .word 0xff01fff1
0e056f20
CM
1074 W(b) __fa526_cache_on
1075 W(b) __armv4_mmu_cache_off
1076 W(b) __fa526_cache_flush
28853ac8 1077
1da177e4
LT
1078 @ These match on the architecture ID
1079
1080 .word 0x00020000 @ ARMv4T
1081 .word 0x000f0000
0e056f20
CM
1082 W(b) __armv4_mmu_cache_on
1083 W(b) __armv4_mmu_cache_off
1084 W(b) __armv4_mmu_cache_flush
1da177e4
LT
1085
1086 .word 0x00050000 @ ARMv5TE
1087 .word 0x000f0000
0e056f20
CM
1088 W(b) __armv4_mmu_cache_on
1089 W(b) __armv4_mmu_cache_off
1090 W(b) __armv4_mmu_cache_flush
1da177e4
LT
1091
1092 .word 0x00060000 @ ARMv5TEJ
1093 .word 0x000f0000
0e056f20
CM
1094 W(b) __armv4_mmu_cache_on
1095 W(b) __armv4_mmu_cache_off
75216859 1096 W(b) __armv5tej_mmu_cache_flush
1da177e4 1097
45a7b9cf 1098 .word 0x0007b000 @ ARMv6
7d09e854 1099 .word 0x000ff000
5010192d 1100 W(b) __armv6_mmu_cache_on
0e056f20
CM
1101 W(b) __armv4_mmu_cache_off
1102 W(b) __armv6_mmu_cache_flush
1da177e4 1103
7d09e854
CM
1104 .word 0x000f0000 @ new CPU Id
1105 .word 0x000f0000
0e056f20
CM
1106 W(b) __armv7_mmu_cache_on
1107 W(b) __armv7_mmu_cache_off
1108 W(b) __armv7_mmu_cache_flush
7d09e854 1109
1da177e4
LT
1110 .word 0 @ unrecognised type
1111 .word 0
1112 mov pc, lr
0e056f20 1113 THUMB( nop )
1da177e4 1114 mov pc, lr
0e056f20 1115 THUMB( nop )
1da177e4 1116 mov pc, lr
0e056f20 1117 THUMB( nop )
1da177e4
LT
1118
1119 .size proc_types, . - proc_types
1120
946a105e
DM
1121 /*
1122 * If you get a "non-constant expression in ".if" statement"
1123 * error from the assembler on this line, check that you have
1124 * not accidentally written a "b" instruction where you should
1125 * have written W(b).
1126 */
1127 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
1128 .error "The size of one or more proc_types entries is wrong."
1129 .endif
1130
1da177e4
LT
1131/*
1132 * Turn off the Cache and MMU. ARMv3 does not support
1133 * reading the control register, but ARMv4 does.
1134 *
21b2841d
UKK
1135 * On exit,
1136 * r0, r1, r2, r3, r9, r12 corrupted
1137 * This routine must preserve:
6d7d0ae5 1138 * r4, r7, r8
1da177e4
LT
1139 */
1140 .align 5
1141cache_off: mov r3, #12 @ cache_off function
1142 b call_cache_fn
1143
10c2df65
HC
1144__armv4_mpu_cache_off:
1145 mrc p15, 0, r0, c1, c0
1146 bic r0, r0, #0x000d
1147 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1148 mov r0, #0
1149 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1150 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1151 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1152 mov pc, lr
1153
1154__armv3_mpu_cache_off:
1155 mrc p15, 0, r0, c1, c0
1156 bic r0, r0, #0x000d
1157 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1158 mov r0, #0
1159 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1160 mov pc, lr
1161
c76b6b41 1162__armv4_mmu_cache_off:
8bdca0ac 1163#ifdef CONFIG_MMU
1da177e4
LT
1164 mrc p15, 0, r0, c1, c0
1165 bic r0, r0, #0x000d
1166 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1167 mov r0, #0
1168 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1169 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
8bdca0ac 1170#endif
1da177e4
LT
1171 mov pc, lr
1172
7d09e854
CM
1173__armv7_mmu_cache_off:
1174 mrc p15, 0, r0, c1, c0
8bdca0ac 1175#ifdef CONFIG_MMU
7d09e854 1176 bic r0, r0, #0x000d
8bdca0ac
CM
1177#else
1178 bic r0, r0, #0x000c
1179#endif
7d09e854 1180 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
7d09e854 1181 mov r0, #0
8bdca0ac 1182#ifdef CONFIG_MMU
7d09e854 1183 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
8bdca0ac 1184#endif
c30c2f99
CM
1185 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1186 mcr p15, 0, r0, c7, c10, 4 @ DSB
1187 mcr p15, 0, r0, c7, c5, 4 @ ISB
401b368c 1188 mov pc, lr
7d09e854 1189
1da177e4
LT
1190/*
1191 * Clean and flush the cache to maintain consistency.
1192 *
e114412f
AB
1193 * On entry,
1194 * r0 = start address
1195 * r1 = end address (exclusive)
1da177e4 1196 * On exit,
21b2841d 1197 * r1, r2, r3, r9, r10, r11, r12 corrupted
1da177e4 1198 * This routine must preserve:
6d7d0ae5 1199 * r4, r6, r7, r8
1da177e4
LT
1200 */
1201 .align 5
1202cache_clean_flush:
1203 mov r3, #16
401b368c 1204 mov r11, r1
1da177e4
LT
1205 b call_cache_fn
1206
10c2df65 1207__armv4_mpu_cache_flush:
238962ac
WD
1208 tst r4, #1
1209 movne pc, lr
10c2df65
HC
1210 mov r2, #1
1211 mov r3, #0
1212 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1213 mov r1, #7 << 5 @ 8 segments
12141: orr r3, r1, #63 << 26 @ 64 entries
12152: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1216 subs r3, r3, #1 << 26
1217 bcs 2b @ entries 63 to 0
1218 subs r1, r1, #1 << 5
1219 bcs 1b @ segments 7 to 0
1220
1221 teq r2, #0
1222 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1223 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1224 mov pc, lr
1225
28853ac8 1226__fa526_cache_flush:
238962ac
WD
1227 tst r4, #1
1228 movne pc, lr
28853ac8
PZ
1229 mov r1, #0
1230 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1231 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1232 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1233 mov pc, lr
10c2df65 1234
c76b6b41 1235__armv6_mmu_cache_flush:
1da177e4 1236 mov r1, #0
238962ac
WD
1237 tst r4, #1
1238 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1da177e4 1239 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
238962ac 1240 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1da177e4
LT
1241 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1242 mov pc, lr
1243
7d09e854 1244__armv7_mmu_cache_flush:
8239fc77 1245 enable_cp15_barriers r10
238962ac
WD
1246 tst r4, #1
1247 bne iflush
7d09e854
CM
1248 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1249 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 1250 mov r10, #0
c30c2f99 1251 beq hierarchical
7d09e854
CM
1252 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1253 b iflush
1254hierarchical:
401b368c
AB
1255 dcache_line_size r1, r2 @ r1 := dcache min line size
1256 sub r2, r1, #1 @ r2 := line size mask
1257 bic r0, r0, r2 @ round down start to line size
1258 sub r11, r11, #1 @ end address is exclusive
1259 bic r11, r11, r2 @ round down end to line size
12600: cmp r0, r11 @ finished?
1261 bgt iflush
1262 mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
1263 add r0, r0, r1
1264 b 0b
7d09e854 1265iflush:
c30c2f99 1266 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 1267 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
1268 mcr p15, 0, r10, c7, c10, 4 @ DSB
1269 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
1270 mov pc, lr
1271
15754bf9 1272__armv5tej_mmu_cache_flush:
238962ac
WD
1273 tst r4, #1
1274 movne pc, lr
9f1984c6 12751: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
15754bf9
NP
1276 bne 1b
1277 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1278 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1279 mov pc, lr
1280
c76b6b41 1281__armv4_mmu_cache_flush:
238962ac
WD
1282 tst r4, #1
1283 movne pc, lr
1da177e4
LT
1284 mov r2, #64*1024 @ default: 32K dcache size (*2)
1285 mov r11, #32 @ default: 32 byte line size
1286 mrc p15, 0, r3, c0, c0, 1 @ read cache type
98e12b5a 1287 teq r3, r9 @ cache ID register present?
1da177e4
LT
1288 beq no_cache_id
1289 mov r1, r3, lsr #18
1290 and r1, r1, #7
1291 mov r2, #1024
1292 mov r2, r2, lsl r1 @ base dcache size *2
1293 tst r3, #1 << 14 @ test M bit
1294 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1295 mov r3, r3, lsr #12
1296 and r3, r3, #3
1297 mov r11, #8
1298 mov r11, r11, lsl r3 @ cache line size in bytes
1299no_cache_id:
0e056f20
CM
1300 mov r1, pc
1301 bic r1, r1, #63 @ align to longest cache line
1da177e4 1302 add r2, r1, r2
0e056f20
CM
13031:
1304 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1305 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1306 THUMB( add r1, r1, r11 )
1da177e4
LT
1307 teq r1, r2
1308 bne 1b
1309
1310 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1311 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1312 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1313 mov pc, lr
1314
c76b6b41 1315__armv3_mmu_cache_flush:
10c2df65 1316__armv3_mpu_cache_flush:
238962ac
WD
1317 tst r4, #1
1318 movne pc, lr
1da177e4 1319 mov r1, #0
63fa7187 1320 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1da177e4
LT
1321 mov pc, lr
1322
1323/*
1324 * Various debugging routines for printing hex characters and
1325 * memory, which again must be relocatable.
1326 */
1327#ifdef DEBUG
88987ef9 1328 .align 2
1da177e4
LT
1329 .type phexbuf,#object
1330phexbuf: .space 12
1331 .size phexbuf, . - phexbuf
1332
be6f9f00 1333@ phex corrupts {r0, r1, r2, r3}
1da177e4
LT
1334phex: adr r3, phexbuf
1335 mov r2, #0
1336 strb r2, [r3, r1]
13371: subs r1, r1, #1
1338 movmi r0, r3
1339 bmi puts
1340 and r2, r0, #15
1341 mov r0, r0, lsr #4
1342 cmp r2, #10
1343 addge r2, r2, #7
1344 add r2, r2, #'0'
1345 strb r2, [r3, r1]
1346 b 1b
1347
be6f9f00 1348@ puts corrupts {r0, r1, r2, r3}
e07e3c33 1349puts: loadsp r3, r2, r1
1da177e4
LT
13501: ldrb r2, [r0], #1
1351 teq r2, #0
1352 moveq pc, lr
0b0c1dbd 13532: writeb r2, r3, r1
1da177e4
LT
1354 mov r1, #0x00020000
13553: subs r1, r1, #1
1356 bne 3b
1357 teq r2, #'\n'
1358 moveq r2, #'\r'
1359 beq 2b
1360 teq r0, #0
1361 bne 1b
1362 mov pc, lr
be6f9f00 1363@ putc corrupts {r0, r1, r2, r3}
1da177e4
LT
1364putc:
1365 mov r2, r0
e07e3c33 1366 loadsp r3, r1, r0
1da177e4 1367 mov r0, #0
1da177e4
LT
1368 b 2b
1369
be6f9f00 1370@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1da177e4
LT
1371memdump: mov r12, r0
1372 mov r10, lr
1373 mov r11, #0
13742: mov r0, r11, lsl #2
1375 add r0, r0, r12
1376 mov r1, #8
1377 bl phex
1378 mov r0, #':'
1379 bl putc
13801: mov r0, #' '
1381 bl putc
1382 ldr r0, [r12, r11, lsl #2]
1383 mov r1, #8
1384 bl phex
1385 and r0, r11, #7
1386 teq r0, #3
1387 moveq r0, #' '
1388 bleq putc
1389 and r0, r11, #7
1390 add r11, r11, #1
1391 teq r0, #7
1392 bne 1b
1393 mov r0, #'\n'
1394 bl putc
1395 cmp r11, #64
1396 blt 2b
1397 mov pc, r10
1398#endif
1399
92c83ff1 1400 .ltorg
424e5994
DM
1401
1402#ifdef CONFIG_ARM_VIRT_EXT
1403.align 5
1404__hyp_reentry_vectors:
1405 W(b) . @ reset
1406 W(b) . @ undef
db227c19
AB
1407#ifdef CONFIG_EFI_STUB
1408 W(b) __enter_kernel_from_hyp @ hvc from HYP
1409#else
424e5994 1410 W(b) . @ svc
db227c19 1411#endif
424e5994
DM
1412 W(b) . @ pabort
1413 W(b) . @ dabort
1414 W(b) __enter_kernel @ hyp
1415 W(b) . @ irq
1416 W(b) . @ fiq
1417#endif /* CONFIG_ARM_VIRT_EXT */
1418
1419__enter_kernel:
1420 mov r0, #0 @ must be 0
f2ae9de0
ŁS
1421 mov r1, r7 @ restore architecture number
1422 mov r2, r8 @ restore atags pointer
c20611df
JE
1423 ARM( mov pc, r4 ) @ call kernel
1424 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1425 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
424e5994 1426
adcc2591 1427reloc_code_end:
1da177e4 1428
81a0bc39 1429#ifdef CONFIG_EFI_STUB
db227c19
AB
1430__enter_kernel_from_hyp:
1431 mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
1432 bic r0, r0, #0x5 @ disable MMU and caches
1433 mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
1434 isb
1435 b __enter_kernel
1436
9f922377 1437ENTRY(efi_enter_kernel)
d0f9ca9b
AB
1438 mov r4, r0 @ preserve image base
1439 mov r8, r1 @ preserve DT pointer
9f922377 1440
db227c19
AB
1441 ARM( adrl r0, call_cache_fn )
1442 THUMB( adr r0, call_cache_fn )
1443 adr r1, 0f @ clean the region of code we
1444 bl cache_clean_flush @ may run with the MMU off
1445
1446#ifdef CONFIG_ARM_VIRT_EXT
1447 @
1448 @ The EFI spec does not support booting on ARM in HYP mode,
1449 @ since it mandates that the MMU and caches are on, with all
1450 @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
1451 @
1452 @ While the EDK2 reference implementation adheres to this,
1453 @ U-Boot might decide to enter the EFI stub in HYP mode
1454 @ anyway, with the MMU and caches either on or off.
1455 @
1456 mrs r0, cpsr @ get the current mode
1457 msr spsr_cxsf, r0 @ record boot mode
1458 and r0, r0, #MODE_MASK @ are we running in HYP mode?
1459 cmp r0, #HYP_MODE
1460 bne .Lefi_svc
1461
1462 mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
1463 tst r1, #0x1 @ MMU enabled at HYP?
1464 beq 1f
1465
1466 @
1467 @ When running in HYP mode with the caches on, we're better
1468 @ off just carrying on using the cached 1:1 mapping that the
1469 @ firmware provided. Set up the HYP vectors so HVC instructions
1470 @ issued from HYP mode take us to the correct handler code. We
1471 @ will disable the MMU before jumping to the kernel proper.
1472 @
1473 adr r0, __hyp_reentry_vectors
1474 mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
1475 isb
1476 b .Lefi_hyp
1477
1478 @
1479 @ When running in HYP mode with the caches off, we need to drop
1480 @ into SVC mode now, and let the decompressor set up its cached
1481 @ 1:1 mapping as usual.
1482 @
14831: mov r9, r4 @ preserve image base
1484 bl __hyp_stub_install @ install HYP stub vectors
1485 safe_svcmode_maskall r1 @ drop to SVC mode
1486 msr spsr_cxsf, r0 @ record boot mode
1487 orr r4, r9, #1 @ restore image base and set LSB
1488 b .Lefi_hyp
1489.Lefi_svc:
1490#endif
d0f9ca9b
AB
1491 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
1492 tst r0, #0x1 @ MMU enabled?
1493 orreq r4, r4, #1 @ set LSB if not
e951a1f4 1494
db227c19 1495.Lefi_hyp:
d0f9ca9b
AB
1496 mov r0, r8 @ DT start
1497 add r1, r8, r2 @ DT end
cf17a1e3 1498 bl cache_clean_flush
c7225494 1499
d0f9ca9b
AB
1500 adr r0, 0f @ switch to our stack
1501 ldr sp, [r0]
1502 add sp, sp, r0
81a0bc39 1503
d0f9ca9b
AB
1504 mov r5, #0 @ appended DTB size
1505 mov r7, #0xFFFFFFFF @ machine ID
1506 b wont_overwrite
9f922377 1507ENDPROC(efi_enter_kernel)
d0f9ca9b 15080: .long .L_user_stack_end - .
81a0bc39
RF
1509#endif
1510
1da177e4 1511 .align
b0c4d4ee 1512 .section ".stack", "aw", %nobits
8d7e4cc2
NP
1513.L_user_stack: .space 4096
1514.L_user_stack_end: