]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/kernel/head.S
Merge branch 'topic/dw' into for-linus
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/head.S
3 *
4 * Copyright (C) 1994-2002 Russell King
e65f38ed
RK
5 * Copyright (c) 2003 ARM Limited
6 * All Rights Reserved
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Kernel startup code for all 32-bit CPUs
13 */
1da177e4
LT
14#include <linux/linkage.h>
15#include <linux/init.h>
16
17#include <asm/assembler.h>
195864cf 18#include <asm/cp15.h>
1da177e4 19#include <asm/domain.h>
1da177e4 20#include <asm/ptrace.h>
e6ae744d 21#include <asm/asm-offsets.h>
f09b9979 22#include <asm/memory.h>
4f7a1812 23#include <asm/thread_info.h>
e73fc88e 24#include <asm/pgtable.h>
1da177e4 25
91a9fec0
RH
26#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
27#include CONFIG_DEBUG_LL_INCLUDE
c293393f
JK
28#endif
29
1da177e4 30/*
37d07b72 31 * swapper_pg_dir is the virtual address of the initial page table.
f06b97ff
RK
32 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
33 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
37d07b72 34 * the least significant 16 bits to be 0x8000, but we could probably
f06b97ff 35 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
1da177e4 36 */
72a20e22 37#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
f06b97ff
RK
38#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
39#error KERNEL_RAM_VADDR must start at 0xXXXX8000
1da177e4
LT
40#endif
41
1b6ba46b
CM
42#ifdef CONFIG_ARM_LPAE
43 /* LPAE requires an additional page for the PGD */
44#define PG_DIR_SIZE 0x5000
45#define PMD_ORDER 3
46#else
e73fc88e
CM
47#define PG_DIR_SIZE 0x4000
48#define PMD_ORDER 2
1b6ba46b 49#endif
e73fc88e 50
1da177e4 51 .globl swapper_pg_dir
e73fc88e 52 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
1da177e4 53
72a20e22 54 .macro pgtbl, rd, phys
2ab4e8c0
CC
55 add \rd, \phys, #TEXT_OFFSET
56 sub \rd, \rd, #PG_DIR_SIZE
1da177e4 57 .endm
1da177e4 58
1da177e4
LT
59/*
60 * Kernel startup entry point.
61 * ---------------------------
62 *
63 * This is normally called from the decompressor code. The requirements
64 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
4c2896e8 65 * r1 = machine nr, r2 = atags or dtb pointer.
1da177e4
LT
66 *
67 * This code is mostly position independent, so if you link the kernel at
68 * 0xc0008000, you call this at __pa(0xc0008000).
69 *
70 * See linux/arch/arm/tools/mach-types for the complete list of machine
71 * numbers for r1.
72 *
73 * We're trying to keep crap to a minimum; DO NOT add any machine specific
74 * crap here - that's what the boot loader (or in extreme, well justified
75 * circumstances, zImage) is for.
76 */
540b5738
DM
77 .arm
78
2abc1c50 79 __HEAD
1da177e4 80ENTRY(stext)
97bcb0fe 81 ARM_BE8(setend be ) @ ensure we are in BE8 mode
540b5738 82
14327c66 83 THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
540b5738
DM
84 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
85 THUMB( .thumb ) @ switch to Thumb now.
86 THUMB(1: )
87
80c59daf
DM
88#ifdef CONFIG_ARM_VIRT_EXT
89 bl __hyp_stub_install
90#endif
91 @ ensure svc mode and all interrupts masked
92 safe_svcmode_maskall r9
93
0f44ba1d 94 mrc p15, 0, r9, c0, c0 @ get processor id
1da177e4
LT
95 bl __lookup_processor_type @ r5=procinfo r9=cpuid
96 movs r10, r5 @ invalid processor (r5=0)?
a75e5248 97 THUMB( it eq ) @ force fixup-able long branch encoding
3c0bdac3 98 beq __error_p @ yes, error 'p'
0eb0511d 99
294064f5
CM
100#ifdef CONFIG_ARM_LPAE
101 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
102 and r3, r3, #0xf @ extract VMSA support
103 cmp r3, #5 @ long-descriptor translation table format?
104 THUMB( it lo ) @ force fixup-able long branch encoding
b3634575 105 blo __error_lpae @ only classic page table format
294064f5
CM
106#endif
107
72a20e22
RK
108#ifndef CONFIG_XIP_KERNEL
109 adr r3, 2f
110 ldmia r3, {r4, r8}
111 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
112 add r8, r8, r4 @ PHYS_OFFSET
113#else
b713aa0b 114 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
72a20e22
RK
115#endif
116
0eb0511d 117 /*
4c2896e8 118 * r1 = machine no, r2 = atags or dtb,
72a20e22 119 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
0eb0511d 120 */
9d20fdd5 121 bl __vet_atags
f00ec48f
RK
122#ifdef CONFIG_SMP_ON_UP
123 bl __fixup_smp
dc21af99
RK
124#endif
125#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
126 bl __fixup_pv_table
f00ec48f 127#endif
1da177e4
LT
128 bl __create_page_tables
129
130 /*
131 * The following calls CPU specific code in a position independent
132 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
6fc31d54 133 * xxx_proc_info structure selected by __lookup_processor_type
b2c3e38a
RK
134 * above.
135 *
136 * The processor init function will be called with:
137 * r1 - machine type
138 * r2 - boot data (atags/dt) pointer
139 * r4 - translation table base (low word)
140 * r5 - translation table base (high word, if LPAE)
141 * r8 - translation table base 1 (pfn if LPAE)
142 * r9 - cpuid
143 * r13 - virtual address for __enable_mmu -> __turn_mmu_on
144 *
145 * On return, the CPU will be ready for the MMU to be turned on,
146 * r0 will hold the CPU control register value, r1, r2, r4, and
147 * r9 will be preserved. r5 will also be preserved if LPAE.
1da177e4 148 */
a4ae4134 149 ldr r13, =__mmap_switched @ address to jump to after
1da177e4 150 @ mmu has been enabled
14327c66 151 badr lr, 1f @ return (PIC) address
b2c3e38a
RK
152#ifdef CONFIG_ARM_LPAE
153 mov r5, #0 @ high TTBR0
154 mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
155#else
d427958a 156 mov r8, r4 @ set TTBR1 to swapper_pg_dir
b2c3e38a 157#endif
bf35706f
AB
158 ldr r12, [r10, #PROCINFO_INITFUNC]
159 add r12, r12, r10
160 ret r12
00945010 1611: b __enable_mmu
93ed3970 162ENDPROC(stext)
a4ae4134 163 .ltorg
72a20e22
RK
164#ifndef CONFIG_XIP_KERNEL
1652: .long .
166 .long PAGE_OFFSET
167#endif
1da177e4
LT
168
169/*
170 * Setup the initial page tables. We only setup the barest
171 * amount which are required to get the kernel running, which
172 * generally means mapping in the kernel code.
173 *
72a20e22 174 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
1da177e4
LT
175 *
176 * Returns:
786f1b73 177 * r0, r3, r5-r7 corrupted
b2c3e38a 178 * r4 = physical page table address
1da177e4 179 */
1da177e4 180__create_page_tables:
72a20e22 181 pgtbl r4, r8 @ page table address
1da177e4
LT
182
183 /*
e73fc88e 184 * Clear the swapper page table
1da177e4
LT
185 */
186 mov r0, r4
187 mov r3, #0
e73fc88e 188 add r6, r0, #PG_DIR_SIZE
1da177e4
LT
1891: str r3, [r0], #4
190 str r3, [r0], #4
191 str r3, [r0], #4
192 str r3, [r0], #4
193 teq r0, r6
194 bne 1b
195
1b6ba46b
CM
196#ifdef CONFIG_ARM_LPAE
197 /*
198 * Build the PGD table (first level) to point to the PMD table. A PGD
199 * entry is 64-bit wide.
200 */
201 mov r0, r4
202 add r3, r4, #0x1000 @ first PMD table address
203 orr r3, r3, #3 @ PGD block type
204 mov r6, #4 @ PTRS_PER_PGD
205 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
d61947a1
WD
2061:
207#ifdef CONFIG_CPU_ENDIAN_BE8
1b6ba46b 208 str r7, [r0], #4 @ set top PGD entry bits
d61947a1
WD
209 str r3, [r0], #4 @ set bottom PGD entry bits
210#else
211 str r3, [r0], #4 @ set bottom PGD entry bits
212 str r7, [r0], #4 @ set top PGD entry bits
213#endif
1b6ba46b
CM
214 add r3, r3, #0x1000 @ next PMD table
215 subs r6, r6, #1
216 bne 1b
217
218 add r4, r4, #0x1000 @ point to the PMD tables
d61947a1
WD
219#ifdef CONFIG_CPU_ENDIAN_BE8
220 add r4, r4, #4 @ we only write the bottom word
221#endif
1b6ba46b
CM
222#endif
223
8799ee9f 224 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
1da177e4
LT
225
226 /*
786f1b73
RK
227 * Create identity mapping to cater for __enable_mmu.
228 * This identity mapping will be removed by paging_init().
1da177e4 229 */
72662e01 230 adr r0, __turn_mmu_on_loc
786f1b73
RK
231 ldmia r0, {r3, r5, r6}
232 sub r0, r0, r3 @ virt->phys offset
72662e01
WD
233 add r5, r5, r0 @ phys __turn_mmu_on
234 add r6, r6, r0 @ phys __turn_mmu_on_end
e73fc88e
CM
235 mov r5, r5, lsr #SECTION_SHIFT
236 mov r6, r6, lsr #SECTION_SHIFT
786f1b73 237
e73fc88e
CM
2381: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
239 str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
240 cmp r5, r6
241 addlo r5, r5, #1 @ next section
242 blo 1b
1da177e4
LT
243
244 /*
9fa16b77 245 * Map our RAM from the start to the end of the kernel .bss section.
1da177e4 246 */
9fa16b77
NP
247 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
248 ldr r6, =(_end - 1)
249 orr r3, r8, r7
e73fc88e 250 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
9fa16b77 2511: str r3, [r0], #1 << PMD_ORDER
e73fc88e 252 add r3, r3, #1 << SECTION_SHIFT
9fa16b77 253 cmp r0, r6
e98ff7f6 254 bls 1b
1da177e4 255
ec3622d9
NP
256#ifdef CONFIG_XIP_KERNEL
257 /*
9fa16b77 258 * Map the kernel image separately as it is not located in RAM.
ec3622d9 259 */
9fa16b77
NP
260#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
261 mov r3, pc
262 mov r3, r3, lsr #SECTION_SHIFT
263 orr r3, r7, r3, lsl #SECTION_SHIFT
264 add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
265 str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
266 ldr r6, =(_edata_loc - 1)
267 add r0, r0, #1 << PMD_ORDER
e73fc88e 268 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
ec3622d9 2691: cmp r0, r6
9fa16b77
NP
270 add r3, r3, #1 << SECTION_SHIFT
271 strls r3, [r0], #1 << PMD_ORDER
ec3622d9
NP
272 bls 1b
273#endif
274
1da177e4 275 /*
9fa16b77 276 * Then map boot params address in r2 if specified.
6f16f499 277 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
1da177e4 278 */
e73fc88e
CM
279 mov r0, r2, lsr #SECTION_SHIFT
280 movs r0, r0, lsl #SECTION_SHIFT
9fa16b77
NP
281 subne r3, r0, r8
282 addne r3, r3, #PAGE_OFFSET
283 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
284 orrne r6, r7, r0
6f16f499
NP
285 strne r6, [r3], #1 << PMD_ORDER
286 addne r6, r6, #1 << SECTION_SHIFT
9fa16b77 287 strne r6, [r3]
1da177e4 288
4e1db26a 289#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
d61947a1
WD
290 sub r4, r4, #4 @ Fixup page table pointer
291 @ for 64-bit descriptors
292#endif
293
c77b0427 294#ifdef CONFIG_DEBUG_LL
9b5a146a 295#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
1da177e4
LT
296 /*
297 * Map in IO space for serial debugging.
298 * This allows debug messages to be output
299 * via a serial console before paging_init.
300 */
639da5ee 301 addruart r7, r3, r0
c293393f 302
e73fc88e
CM
303 mov r3, r3, lsr #SECTION_SHIFT
304 mov r3, r3, lsl #PMD_ORDER
c293393f 305
1da177e4 306 add r0, r4, r3
e73fc88e 307 mov r3, r7, lsr #SECTION_SHIFT
c293393f 308 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
e73fc88e 309 orr r3, r7, r3, lsl #SECTION_SHIFT
1b6ba46b
CM
310#ifdef CONFIG_ARM_LPAE
311 mov r7, #1 << (54 - 32) @ XN
d61947a1
WD
312#ifdef CONFIG_CPU_ENDIAN_BE8
313 str r7, [r0], #4
314 str r3, [r0], #4
1b6ba46b 315#else
f67860a7 316 str r3, [r0], #4
1b6ba46b
CM
317 str r7, [r0], #4
318#endif
d61947a1
WD
319#else
320 orr r3, r3, #PMD_SECT_XN
321 str r3, [r0], #4
322#endif
c293393f 323
9b5a146a
NP
324#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
325 /* we don't need any serial debugging mappings */
c293393f 326 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
9b5a146a 327#endif
c293393f 328
1da177e4
LT
329#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
330 /*
3c0bdac3
RK
331 * If we're using the NetWinder or CATS, we also need to map
332 * in the 16550-type serial port for the debug messages
1da177e4 333 */
e73fc88e 334 add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
c77b0427
RK
335 orr r3, r7, #0x7c000000
336 str r3, [r0]
1da177e4 337#endif
1da177e4
LT
338#ifdef CONFIG_ARCH_RPC
339 /*
340 * Map in screen at 0x02000000 & SCREEN2_BASE
341 * Similar reasons here - for debug. This is
342 * only for Acorn RiscPC architectures.
343 */
e73fc88e 344 add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
c77b0427 345 orr r3, r7, #0x02000000
1da177e4 346 str r3, [r0]
e73fc88e 347 add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
1da177e4 348 str r3, [r0]
c77b0427 349#endif
1b6ba46b
CM
350#endif
351#ifdef CONFIG_ARM_LPAE
352 sub r4, r4, #0x1000 @ point to the PGD table
1da177e4 353#endif
6ebbf2ce 354 ret lr
93ed3970 355ENDPROC(__create_page_tables)
1da177e4 356 .ltorg
4f79a5dd 357 .align
72662e01 358__turn_mmu_on_loc:
786f1b73 359 .long .
72662e01
WD
360 .long __turn_mmu_on
361 .long __turn_mmu_on_end
1da177e4 362
00945010 363#if defined(CONFIG_SMP)
2449189b 364 .text
bafe5865 365 .arm
c07b5fd0 366ENTRY(secondary_startup_arm)
14327c66 367 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
bafe5865
SB
368 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
369 THUMB( .thumb ) @ switch to Thumb now.
370 THUMB(1: )
00945010
RK
371ENTRY(secondary_startup)
372 /*
373 * Common entry point for secondary CPUs.
374 *
375 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
376 * the processor type - there is no need to check the machine type
377 * as it has already been validated by the primary processor.
378 */
97bcb0fe
BD
379
380 ARM_BE8(setend be) @ ensure we are in BE8 mode
381
80c59daf 382#ifdef CONFIG_ARM_VIRT_EXT
6e484be1 383 bl __hyp_stub_install_secondary
80c59daf
DM
384#endif
385 safe_svcmode_maskall r9
386
00945010
RK
387 mrc p15, 0, r9, c0, c0 @ get processor id
388 bl __lookup_processor_type
389 movs r10, r5 @ invalid processor?
390 moveq r0, #'p' @ yes, error 'p'
a75e5248 391 THUMB( it eq ) @ force fixup-able long branch encoding
00945010
RK
392 beq __error_p
393
394 /*
395 * Use the page tables supplied from __cpu_up.
396 */
397 adr r4, __secondary_data
398 ldmia r4, {r5, r7, r12} @ address to jump to after
d427958a 399 sub lr, r4, r5 @ mmu has been enabled
b2c3e38a
RK
400 add r3, r7, lr
401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
998ef5d8
GC
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
b2c3e38a 405 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
14327c66 406 badr lr, __enable_mmu @ return address
00945010 407 mov r13, r12 @ __secondary_switched address
bf35706f
AB
408 ldr r12, [r10, #PROCINFO_INITFUNC]
409 add r12, r12, r10 @ initialise processor
410 @ (return control reg)
411 ret r12
00945010 412ENDPROC(secondary_startup)
bafe5865 413ENDPROC(secondary_startup_arm)
00945010
RK
414
415 /*
416 * r6 = &secondary_data
417 */
418ENTRY(__secondary_switched)
b2c3e38a 419 ldr sp, [r7, #12] @ get secondary_data.stack
00945010
RK
420 mov fp, #0
421 b secondary_start_kernel
422ENDPROC(__secondary_switched)
423
4f79a5dd
DM
424 .align
425
00945010
RK
426 .type __secondary_data, %object
427__secondary_data:
428 .long .
429 .long secondary_data
430 .long __secondary_switched
431#endif /* defined(CONFIG_SMP) */
432
433
434
435/*
436 * Setup common bits before finally enabling the MMU. Essentially
437 * this is just loading the page table pointer and domain access
b2c3e38a
RK
438 * registers. All these registers need to be preserved by the
439 * processor setup function (or set in the case of r0)
865a4fae
RK
440 *
441 * r0 = cp#15 control register
442 * r1 = machine ID
4c2896e8 443 * r2 = atags or dtb pointer
b2c3e38a
RK
444 * r4 = TTBR pointer (low word)
445 * r5 = TTBR pointer (high word if LPAE)
865a4fae
RK
446 * r9 = processor ID
447 * r13 = *virtual* address to jump to upon completion
00945010
RK
448 */
449__enable_mmu:
8428e84d 450#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
00945010
RK
451 orr r0, r0, #CR_A
452#else
453 bic r0, r0, #CR_A
454#endif
455#ifdef CONFIG_CPU_DCACHE_DISABLE
456 bic r0, r0, #CR_C
457#endif
458#ifdef CONFIG_CPU_BPREDICT_DISABLE
459 bic r0, r0, #CR_Z
460#endif
461#ifdef CONFIG_CPU_ICACHE_DISABLE
462 bic r0, r0, #CR_I
463#endif
b2c3e38a
RK
464#ifdef CONFIG_ARM_LPAE
465 mcrr p15, 0, r4, r5, c2 @ load TTBR0
466#else
0171356a 467 mov r5, #DACR_INIT
00945010
RK
468 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
469 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
1b6ba46b 470#endif
00945010
RK
471 b __turn_mmu_on
472ENDPROC(__enable_mmu)
473
474/*
475 * Enable the MMU. This completely changes the structure of the visible
476 * memory space. You will not be able to trace execution through this.
477 * If you have an enquiry about this, *please* check the linux-arm-kernel
478 * mailing list archives BEFORE sending another post to the list.
479 *
480 * r0 = cp#15 control register
865a4fae 481 * r1 = machine ID
4c2896e8 482 * r2 = atags or dtb pointer
865a4fae 483 * r9 = processor ID
00945010
RK
484 * r13 = *virtual* address to jump to upon completion
485 *
486 * other registers depend on the function called upon completion
487 */
488 .align 5
4e8ee7de
WD
489 .pushsection .idmap.text, "ax"
490ENTRY(__turn_mmu_on)
00945010 491 mov r0, r0
d675d0bc 492 instr_sync
00945010
RK
493 mcr p15, 0, r0, c1, c0, 0 @ write control reg
494 mrc p15, 0, r3, c0, c0, 0 @ read id reg
d675d0bc 495 instr_sync
00945010
RK
496 mov r3, r3
497 mov r3, r13
6ebbf2ce 498 ret r3
72662e01 499__turn_mmu_on_end:
00945010 500ENDPROC(__turn_mmu_on)
4e8ee7de 501 .popsection
00945010 502
1da177e4 503
f00ec48f 504#ifdef CONFIG_SMP_ON_UP
1dc5455f 505 __HEAD
f00ec48f 506__fixup_smp:
e98ff0f5
RK
507 and r3, r9, #0x000f0000 @ architecture version
508 teq r3, #0x000f0000 @ CPU ID supported?
f00ec48f
RK
509 bne __fixup_smp_on_up @ no, assume UP
510
e98ff0f5
RK
511 bic r3, r9, #0x00ff0000
512 bic r3, r3, #0x0000000f @ mask 0xff00fff0
513 mov r4, #0x41000000
0eb0511d 514 orr r4, r4, #0x0000b000
e98ff0f5
RK
515 orr r4, r4, #0x00000020 @ val 0x4100b020
516 teq r3, r4 @ ARM 11MPCore?
6ebbf2ce 517 reteq lr @ yes, assume SMP
f00ec48f
RK
518
519 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
e98ff0f5
RK
520 and r0, r0, #0xc0000000 @ multiprocessing extensions and
521 teq r0, #0x80000000 @ not part of a uniprocessor system?
bc41b872
SS
522 bne __fixup_smp_on_up @ no, assume UP
523
524 @ Core indicates it is SMP. Check for Aegis SOC where a single
525 @ Cortex-A9 CPU is present but SMP operations fault.
526 mov r4, #0x41000000
527 orr r4, r4, #0x0000c000
528 orr r4, r4, #0x00000090
529 teq r3, r4 @ Check for ARM Cortex-A9
6ebbf2ce 530 retne lr @ Not ARM Cortex-A9,
bc41b872
SS
531
532 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
533 @ below address check will need to be #ifdef'd or equivalent
534 @ for the Aegis platform.
535 mrc p15, 4, r0, c15, c0 @ get SCU base address
536 teq r0, #0x0 @ '0' on actual UP A9 hardware
537 beq __fixup_smp_on_up @ So its an A9 UP
538 ldr r0, [r0, #4] @ read SCU Config
10593b2e 539ARM_BE8(rev r0, r0) @ byteswap if big endian
bc41b872
SS
540 and r0, r0, #0x3 @ number of CPUs
541 teq r0, #0x0 @ is 1?
6ebbf2ce 542 retne lr
f00ec48f
RK
543
544__fixup_smp_on_up:
545 adr r0, 1f
0eb0511d 546 ldmia r0, {r3 - r5}
f00ec48f 547 sub r3, r0, r3
0eb0511d
RK
548 add r4, r4, r3
549 add r5, r5, r3
4a9cb360 550 b __do_fixup_smp_on_up
f00ec48f
RK
551ENDPROC(__fixup_smp)
552
4f79a5dd 553 .align
f00ec48f
RK
5541: .word .
555 .word __smpalt_begin
556 .word __smpalt_end
557
558 .pushsection .data
559 .globl smp_on_up
560smp_on_up:
561 ALT_SMP(.long 1)
562 ALT_UP(.long 0)
563 .popsection
4a9cb360 564#endif
f00ec48f 565
4a9cb360
RK
566 .text
567__do_fixup_smp_on_up:
568 cmp r4, r5
6ebbf2ce 569 reths lr
4a9cb360
RK
570 ldmia r4!, {r0, r6}
571 ARM( str r6, [r0, r3] )
572 THUMB( add r0, r0, r3 )
573#ifdef __ARMEB__
574 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
f00ec48f 575#endif
4a9cb360
RK
576 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
577 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
578 THUMB( strh r6, [r0] )
579 b __do_fixup_smp_on_up
580ENDPROC(__do_fixup_smp_on_up)
581
582ENTRY(fixup_smp)
583 stmfd sp!, {r4 - r6, lr}
584 mov r4, r0
585 add r5, r0, r1
586 mov r3, #0
587 bl __do_fixup_smp_on_up
588 ldmfd sp!, {r4 - r6, pc}
589ENDPROC(fixup_smp)
f00ec48f 590
830fd4d6 591#ifdef __ARMEB__
f52bb722
S
592#define LOW_OFFSET 0x4
593#define HIGH_OFFSET 0x0
594#else
595#define LOW_OFFSET 0x0
596#define HIGH_OFFSET 0x4
597#endif
598
dc21af99
RK
599#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
600
601/* __fixup_pv_table - patch the stub instructions with the delta between
602 * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
603 * can be expressed by an immediate shifter operand. The stub instruction
604 * has a form of '(add|sub) rd, rn, #imm'.
605 */
606 __HEAD
607__fixup_pv_table:
608 adr r0, 1f
f52bb722
S
609 ldmia r0, {r3-r7}
610 mvn ip, #0
611 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
dc21af99
RK
612 add r4, r4, r3 @ adjust table start address
613 add r5, r5, r3 @ adjust table end address
e26a9e00 614 add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
f52bb722 615 add r7, r7, r3 @ adjust __pv_offset address
7a061928 616 mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
e3892e91 617 str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
f52bb722 618 strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
dc21af99
RK
619 mov r6, r3, lsr #24 @ constant for add/sub instructions
620 teq r3, r6, lsl #24 @ must be 16MiB aligned
b511d75d 621THUMB( it ne @ cross section branch )
dc21af99 622 bne __error
f52bb722 623 str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
dc21af99
RK
624 b __fixup_a_pv_table
625ENDPROC(__fixup_pv_table)
626
627 .align
6281: .long .
629 .long __pv_table_begin
630 .long __pv_table_end
e26a9e00 6312: .long __pv_phys_pfn_offset
f52bb722 632 .long __pv_offset
dc21af99
RK
633
634 .text
635__fixup_a_pv_table:
f52bb722
S
636 adr r0, 3f
637 ldr r6, [r0]
638 add r6, r6, r3
639 ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
640 ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
641 mov r6, r6, lsr #24
642 cmn r0, #1
b511d75d 643#ifdef CONFIG_THUMB2_KERNEL
f52bb722 644 moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
daece596
NP
645 lsls r6, #24
646 beq 2f
b511d75d
NP
647 clz r7, r6
648 lsr r6, #24
649 lsl r6, r7
650 bic r6, #0x0080
651 lsrs r7, #1
652 orrcs r6, #0x0080
653 orr r6, r6, r7, lsl #12
654 orr r6, #0x4000
daece596
NP
655 b 2f
6561: add r7, r3
657 ldrh ip, [r7, #2]
2f9bf9be 658ARM_BE8(rev16 ip, ip)
f52bb722
S
659 tst ip, #0x4000
660 and ip, #0x8f00
661 orrne ip, r6 @ mask in offset bits 31-24
662 orreq ip, r0 @ mask in offset bits 7-0
2f9bf9be 663ARM_BE8(rev16 ip, ip)
b511d75d 664 strh ip, [r7, #2]
2098990e
RK
665 bne 2f
666 ldrh ip, [r7]
667ARM_BE8(rev16 ip, ip)
668 bic ip, #0x20
669 orr ip, ip, r0, lsr #16
670ARM_BE8(rev16 ip, ip)
671 strh ip, [r7]
daece596 6722: cmp r4, r5
b511d75d 673 ldrcc r7, [r4], #4 @ use branch for delay slot
daece596 674 bcc 1b
b511d75d 675 bx lr
d9a790df
VK
676#else
677#ifdef CONFIG_CPU_ENDIAN_BE8
678 moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
b511d75d 679#else
f52bb722 680 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
d9a790df 681#endif
daece596
NP
682 b 2f
6831: ldr ip, [r7, r3]
2f9bf9be
BD
684#ifdef CONFIG_CPU_ENDIAN_BE8
685 @ in BE8, we load data in BE, but instructions still in LE
686 bic ip, ip, #0xff000000
2098990e
RK
687 tst ip, #0x000f0000 @ check the rotation field
688 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
689 biceq ip, ip, #0x00004000 @ clear bit 22
d9a790df 690 orreq ip, ip, r0 @ mask in offset bits 7-0
2f9bf9be 691#else
dc21af99 692 bic ip, ip, #0x000000ff
f52bb722
S
693 tst ip, #0xf00 @ check the rotation field
694 orrne ip, ip, r6 @ mask in offset bits 31-24
695 biceq ip, ip, #0x400000 @ clear bit 22
696 orreq ip, ip, r0 @ mask in offset bits 7-0
2f9bf9be 697#endif
dc21af99 698 str ip, [r7, r3]
daece596 6992: cmp r4, r5
dc21af99 700 ldrcc r7, [r4], #4 @ use branch for delay slot
daece596 701 bcc 1b
6ebbf2ce 702 ret lr
b511d75d 703#endif
dc21af99
RK
704ENDPROC(__fixup_a_pv_table)
705
830fd4d6 706 .align
f52bb722
S
7073: .long __pv_offset
708
dc21af99
RK
709ENTRY(fixup_pv_table)
710 stmfd sp!, {r4 - r7, lr}
dc21af99
RK
711 mov r3, #0 @ no offset
712 mov r4, r0 @ r0 = table start
713 add r5, r0, r1 @ r1 = table size
dc21af99
RK
714 bl __fixup_a_pv_table
715 ldmfd sp!, {r4 - r7, pc}
716ENDPROC(fixup_pv_table)
717
dc21af99 718 .data
e26a9e00
RK
719 .globl __pv_phys_pfn_offset
720 .type __pv_phys_pfn_offset, %object
721__pv_phys_pfn_offset:
722 .word 0
723 .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
f52bb722
S
724
725 .globl __pv_offset
726 .type __pv_offset, %object
dc21af99 727__pv_offset:
f52bb722
S
728 .quad 0
729 .size __pv_offset, . -__pv_offset
dc21af99
RK
730#endif
731
75d90832 732#include "head-common.S"