]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm/kernel/head.S
Merge master.kernel.org:/home/rmk/linux-2.6-serial
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / head.S
1 /*
2 * linux/arch/arm/kernel/head.S
3 *
4 * Copyright (C) 1994-2002 Russell King
5 * Copyright (c) 2003 ARM Limited
6 * All Rights Reserved
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Kernel startup code for all 32-bit CPUs
13 */
14 #include <linux/config.h>
15 #include <linux/linkage.h>
16 #include <linux/init.h>
17
18 #include <asm/assembler.h>
19 #include <asm/domain.h>
20 #include <asm/procinfo.h>
21 #include <asm/ptrace.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/memory.h>
24 #include <asm/thread_info.h>
25 #include <asm/system.h>
26
27 #define PROCINFO_MMUFLAGS 8
28 #define PROCINFO_INITFUNC 12
29
30 #define MACHINFO_TYPE 0
31 #define MACHINFO_PHYSRAM 4
32 #define MACHINFO_PHYSIO 8
33 #define MACHINFO_PGOFFIO 12
34 #define MACHINFO_NAME 16
35
36 /*
37 * swapper_pg_dir is the virtual address of the initial page table.
38 * We place the page tables 16K below KERNEL_RAM_ADDR. Therefore, we must
39 * make sure that KERNEL_RAM_ADDR is correctly set. Currently, we expect
40 * the least significant 16 bits to be 0x8000, but we could probably
41 * relax this restriction to KERNEL_RAM_ADDR >= PAGE_OFFSET + 0x4000.
42 */
43 #if (KERNEL_RAM_ADDR & 0xffff) != 0x8000
44 #error KERNEL_RAM_ADDR must start at 0xXXXX8000
45 #endif
46
47 .globl swapper_pg_dir
48 .equ swapper_pg_dir, KERNEL_RAM_ADDR - 0x4000
49
50 .macro pgtbl, rd
51 ldr \rd, =(__virt_to_phys(KERNEL_RAM_ADDR - 0x4000))
52 .endm
53
54 #ifdef CONFIG_XIP_KERNEL
55 #define TEXTADDR XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
56 #else
57 #define TEXTADDR KERNEL_RAM_ADDR
58 #endif
59
60 /*
61 * Kernel startup entry point.
62 * ---------------------------
63 *
64 * This is normally called from the decompressor code. The requirements
65 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
66 * r1 = machine nr.
67 *
68 * This code is mostly position independent, so if you link the kernel at
69 * 0xc0008000, you call this at __pa(0xc0008000).
70 *
71 * See linux/arch/arm/tools/mach-types for the complete list of machine
72 * numbers for r1.
73 *
74 * We're trying to keep crap to a minimum; DO NOT add any machine specific
75 * crap here - that's what the boot loader (or in extreme, well justified
76 * circumstances, zImage) is for.
77 */
78 __INIT
79 .type stext, %function
80 ENTRY(stext)
81 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
82 @ and irqs disabled
83 bl __lookup_processor_type @ r5=procinfo r9=cpuid
84 movs r10, r5 @ invalid processor (r5=0)?
85 beq __error_p @ yes, error 'p'
86 bl __lookup_machine_type @ r5=machinfo
87 movs r8, r5 @ invalid machine (r5=0)?
88 beq __error_a @ yes, error 'a'
89 bl __create_page_tables
90
91 /*
92 * The following calls CPU specific code in a position independent
93 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
94 * xxx_proc_info structure selected by __lookup_machine_type
95 * above. On return, the CPU will be ready for the MMU to be
96 * turned on, and r0 will hold the CPU control register value.
97 */
98 ldr r13, __switch_data @ address to jump to after
99 @ mmu has been enabled
100 adr lr, __enable_mmu @ return (PIC) address
101 add pc, r10, #PROCINFO_INITFUNC
102
103 .type __switch_data, %object
104 __switch_data:
105 .long __mmap_switched
106 .long __data_loc @ r4
107 .long __data_start @ r5
108 .long __bss_start @ r6
109 .long _end @ r7
110 .long processor_id @ r4
111 .long __machine_arch_type @ r5
112 .long cr_alignment @ r6
113 .long init_thread_union + THREAD_START_SP @ sp
114
115 /*
116 * The following fragment of code is executed with the MMU on, and uses
117 * absolute addresses; this is not position independent.
118 *
119 * r0 = cp#15 control register
120 * r1 = machine ID
121 * r9 = processor ID
122 */
123 .type __mmap_switched, %function
124 __mmap_switched:
125 adr r3, __switch_data + 4
126
127 ldmia r3!, {r4, r5, r6, r7}
128 cmp r4, r5 @ Copy data segment if needed
129 1: cmpne r5, r6
130 ldrne fp, [r4], #4
131 strne fp, [r5], #4
132 bne 1b
133
134 mov fp, #0 @ Clear BSS (and zero fp)
135 1: cmp r6, r7
136 strcc fp, [r6],#4
137 bcc 1b
138
139 ldmia r3, {r4, r5, r6, sp}
140 str r9, [r4] @ Save processor ID
141 str r1, [r5] @ Save machine type
142 bic r4, r0, #CR_A @ Clear 'A' bit
143 stmia r6, {r0, r4} @ Save control register values
144 b start_kernel
145
146 #if defined(CONFIG_SMP)
147 .type secondary_startup, #function
148 ENTRY(secondary_startup)
149 /*
150 * Common entry point for secondary CPUs.
151 *
152 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
153 * the processor type - there is no need to check the machine type
154 * as it has already been validated by the primary processor.
155 */
156 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
157 bl __lookup_processor_type
158 movs r10, r5 @ invalid processor?
159 moveq r0, #'p' @ yes, error 'p'
160 beq __error
161
162 /*
163 * Use the page tables supplied from __cpu_up.
164 */
165 adr r4, __secondary_data
166 ldmia r4, {r5, r6, r13} @ address to jump to after
167 sub r4, r4, r5 @ mmu has been enabled
168 ldr r4, [r6, r4] @ get secondary_data.pgdir
169 adr lr, __enable_mmu @ return address
170 add pc, r10, #12 @ initialise processor
171 @ (return control reg)
172
173 /*
174 * r6 = &secondary_data
175 */
176 ENTRY(__secondary_switched)
177 ldr sp, [r6, #4] @ get secondary_data.stack
178 mov fp, #0
179 b secondary_start_kernel
180
181 .type __secondary_data, %object
182 __secondary_data:
183 .long .
184 .long secondary_data
185 .long __secondary_switched
186 #endif /* defined(CONFIG_SMP) */
187
188
189
190 /*
191 * Setup common bits before finally enabling the MMU. Essentially
192 * this is just loading the page table pointer and domain access
193 * registers.
194 */
195 .type __enable_mmu, %function
196 __enable_mmu:
197 #ifdef CONFIG_ALIGNMENT_TRAP
198 orr r0, r0, #CR_A
199 #else
200 bic r0, r0, #CR_A
201 #endif
202 #ifdef CONFIG_CPU_DCACHE_DISABLE
203 bic r0, r0, #CR_C
204 #endif
205 #ifdef CONFIG_CPU_BPREDICT_DISABLE
206 bic r0, r0, #CR_Z
207 #endif
208 #ifdef CONFIG_CPU_ICACHE_DISABLE
209 bic r0, r0, #CR_I
210 #endif
211 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
212 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
213 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
214 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
215 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
216 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
217 b __turn_mmu_on
218
219 /*
220 * Enable the MMU. This completely changes the structure of the visible
221 * memory space. You will not be able to trace execution through this.
222 * If you have an enquiry about this, *please* check the linux-arm-kernel
223 * mailing list archives BEFORE sending another post to the list.
224 *
225 * r0 = cp#15 control register
226 * r13 = *virtual* address to jump to upon completion
227 *
228 * other registers depend on the function called upon completion
229 */
230 .align 5
231 .type __turn_mmu_on, %function
232 __turn_mmu_on:
233 mov r0, r0
234 mcr p15, 0, r0, c1, c0, 0 @ write control reg
235 mrc p15, 0, r3, c0, c0, 0 @ read id reg
236 mov r3, r3
237 mov r3, r3
238 mov pc, r13
239
240
241
242 /*
243 * Setup the initial page tables. We only setup the barest
244 * amount which are required to get the kernel running, which
245 * generally means mapping in the kernel code.
246 *
247 * r8 = machinfo
248 * r9 = cpuid
249 * r10 = procinfo
250 *
251 * Returns:
252 * r0, r3, r5, r6, r7 corrupted
253 * r4 = physical page table address
254 */
255 .type __create_page_tables, %function
256 __create_page_tables:
257 ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram
258 pgtbl r4 @ page table address
259
260 /*
261 * Clear the 16K level 1 swapper page table
262 */
263 mov r0, r4
264 mov r3, #0
265 add r6, r0, #0x4000
266 1: str r3, [r0], #4
267 str r3, [r0], #4
268 str r3, [r0], #4
269 str r3, [r0], #4
270 teq r0, r6
271 bne 1b
272
273 ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags
274
275 /*
276 * Create identity mapping for first MB of kernel to
277 * cater for the MMU enable. This identity mapping
278 * will be removed by paging_init(). We use our current program
279 * counter to determine corresponding section base address.
280 */
281 mov r6, pc, lsr #20 @ start of kernel section
282 orr r3, r7, r6, lsl #20 @ flags + kernel base
283 str r3, [r4, r6, lsl #2] @ identity mapping
284
285 /*
286 * Now setup the pagetables for our kernel direct
287 * mapped region. We round TEXTADDR down to the
288 * nearest megabyte boundary. It is assumed that
289 * the kernel fits within 4 contigous 1MB sections.
290 */
291 add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
292 str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]!
293 add r3, r3, #1 << 20
294 str r3, [r0, #4]! @ KERNEL + 1MB
295 add r3, r3, #1 << 20
296 str r3, [r0, #4]! @ KERNEL + 2MB
297 add r3, r3, #1 << 20
298 str r3, [r0, #4] @ KERNEL + 3MB
299
300 /*
301 * Then map first 1MB of ram in case it contains our boot params.
302 */
303 add r0, r4, #PAGE_OFFSET >> 18
304 orr r6, r5, r7
305 str r6, [r0]
306
307 #ifdef CONFIG_XIP_KERNEL
308 /*
309 * Map some ram to cover our .data and .bss areas.
310 * Mapping 3MB should be plenty.
311 */
312 sub r3, r4, r5
313 mov r3, r3, lsr #20
314 add r0, r0, r3, lsl #2
315 add r6, r6, r3, lsl #20
316 str r6, [r0], #4
317 add r6, r6, #(1 << 20)
318 str r6, [r0], #4
319 add r6, r6, #(1 << 20)
320 str r6, [r0]
321 #endif
322
323 #ifdef CONFIG_DEBUG_LL
324 bic r7, r7, #0x0c @ turn off cacheable
325 @ and bufferable bits
326 /*
327 * Map in IO space for serial debugging.
328 * This allows debug messages to be output
329 * via a serial console before paging_init.
330 */
331 ldr r3, [r8, #MACHINFO_PGOFFIO]
332 add r0, r4, r3
333 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
334 cmp r3, #0x0800 @ limit to 512MB
335 movhi r3, #0x0800
336 add r6, r0, r3
337 ldr r3, [r8, #MACHINFO_PHYSIO]
338 orr r3, r3, r7
339 1: str r3, [r0], #4
340 add r3, r3, #1 << 20
341 teq r0, r6
342 bne 1b
343 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
344 /*
345 * If we're using the NetWinder or CATS, we also need to map
346 * in the 16550-type serial port for the debug messages
347 */
348 add r0, r4, #0xff000000 >> 18
349 orr r3, r7, #0x7c000000
350 str r3, [r0]
351 #endif
352 #ifdef CONFIG_ARCH_RPC
353 /*
354 * Map in screen at 0x02000000 & SCREEN2_BASE
355 * Similar reasons here - for debug. This is
356 * only for Acorn RiscPC architectures.
357 */
358 add r0, r4, #0x02000000 >> 18
359 orr r3, r7, #0x02000000
360 str r3, [r0]
361 add r0, r4, #0xd8000000 >> 18
362 str r3, [r0]
363 #endif
364 #endif
365 mov pc, lr
366 .ltorg
367
368
369
370 /*
371 * Exception handling. Something went wrong and we can't proceed. We
372 * ought to tell the user, but since we don't have any guarantee that
373 * we're even running on the right architecture, we do virtually nothing.
374 *
375 * If CONFIG_DEBUG_LL is set we try to print out something about the error
376 * and hope for the best (useful if bootloader fails to pass a proper
377 * machine ID for example).
378 */
379
380 .type __error_p, %function
381 __error_p:
382 #ifdef CONFIG_DEBUG_LL
383 adr r0, str_p1
384 bl printascii
385 b __error
386 str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
387 .align
388 #endif
389
390 .type __error_a, %function
391 __error_a:
392 #ifdef CONFIG_DEBUG_LL
393 mov r4, r1 @ preserve machine ID
394 adr r0, str_a1
395 bl printascii
396 mov r0, r4
397 bl printhex8
398 adr r0, str_a2
399 bl printascii
400 adr r3, 3f
401 ldmia r3, {r4, r5, r6} @ get machine desc list
402 sub r4, r3, r4 @ get offset between virt&phys
403 add r5, r5, r4 @ convert virt addresses to
404 add r6, r6, r4 @ physical address space
405 1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
406 bl printhex8
407 mov r0, #'\t'
408 bl printch
409 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
410 add r0, r0, r4
411 bl printascii
412 mov r0, #'\n'
413 bl printch
414 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
415 cmp r5, r6
416 blo 1b
417 adr r0, str_a3
418 bl printascii
419 b __error
420 str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
421 str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
422 str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
423 .align
424 #endif
425
426 .type __error, %function
427 __error:
428 #ifdef CONFIG_ARCH_RPC
429 /*
430 * Turn the screen red on a error - RiscPC only.
431 */
432 mov r0, #0x02000000
433 mov r3, #0x11
434 orr r3, r3, r3, lsl #8
435 orr r3, r3, r3, lsl #16
436 str r3, [r0], #4
437 str r3, [r0], #4
438 str r3, [r0], #4
439 str r3, [r0], #4
440 #endif
441 1: mov r0, r0
442 b 1b
443
444
445 /*
446 * Read processor ID register (CP#15, CR0), and look up in the linker-built
447 * supported processor list. Note that we can't use the absolute addresses
448 * for the __proc_info lists since we aren't running with the MMU on
449 * (and therefore, we are not in the correct address space). We have to
450 * calculate the offset.
451 *
452 * Returns:
453 * r3, r4, r6 corrupted
454 * r5 = proc_info pointer in physical address space
455 * r9 = cpuid
456 */
457 .type __lookup_processor_type, %function
458 __lookup_processor_type:
459 adr r3, 3f
460 ldmda r3, {r5, r6, r9}
461 sub r3, r3, r9 @ get offset between virt&phys
462 add r5, r5, r3 @ convert virt addresses to
463 add r6, r6, r3 @ physical address space
464 mrc p15, 0, r9, c0, c0 @ get processor id
465 1: ldmia r5, {r3, r4} @ value, mask
466 and r4, r4, r9 @ mask wanted bits
467 teq r3, r4
468 beq 2f
469 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
470 cmp r5, r6
471 blo 1b
472 mov r5, #0 @ unknown processor
473 2: mov pc, lr
474
475 /*
476 * This provides a C-API version of the above function.
477 */
478 ENTRY(lookup_processor_type)
479 stmfd sp!, {r4 - r6, r9, lr}
480 bl __lookup_processor_type
481 mov r0, r5
482 ldmfd sp!, {r4 - r6, r9, pc}
483
484 /*
485 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
486 * more information about the __proc_info and __arch_info structures.
487 */
488 .long __proc_info_begin
489 .long __proc_info_end
490 3: .long .
491 .long __arch_info_begin
492 .long __arch_info_end
493
494 /*
495 * Lookup machine architecture in the linker-build list of architectures.
496 * Note that we can't use the absolute addresses for the __arch_info
497 * lists since we aren't running with the MMU on (and therefore, we are
498 * not in the correct address space). We have to calculate the offset.
499 *
500 * r1 = machine architecture number
501 * Returns:
502 * r3, r4, r6 corrupted
503 * r5 = mach_info pointer in physical address space
504 */
505 .type __lookup_machine_type, %function
506 __lookup_machine_type:
507 adr r3, 3b
508 ldmia r3, {r4, r5, r6}
509 sub r3, r3, r4 @ get offset between virt&phys
510 add r5, r5, r3 @ convert virt addresses to
511 add r6, r6, r3 @ physical address space
512 1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
513 teq r3, r1 @ matches loader number?
514 beq 2f @ found
515 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
516 cmp r5, r6
517 blo 1b
518 mov r5, #0 @ unknown machine
519 2: mov pc, lr
520
521 /*
522 * This provides a C-API version of the above function.
523 */
524 ENTRY(lookup_machine_type)
525 stmfd sp!, {r4 - r6, lr}
526 mov r1, r0
527 bl __lookup_machine_type
528 mov r0, r5
529 ldmfd sp!, {r4 - r6, pc}