]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/kernel/setup.c
ARM: 7677/1: LPAE: Fix mapping in alloc_init_section for unaligned addresses
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
93c02ab4 23#include <linux/of_fdt.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
e119bfff 27#include <linux/proc_fs.h>
2778f620 28#include <linux/memblock.h>
2ecccf90
DM
29#include <linux/bug.h>
30#include <linux/compiler.h>
27a3f0e9 31#include <linux/sort.h>
1da177e4 32
b86040a5 33#include <asm/unified.h>
15d07dc9 34#include <asm/cp15.h>
1da177e4 35#include <asm/cpu.h>
0ba8b9b2 36#include <asm/cputype.h>
1da177e4 37#include <asm/elf.h>
1da177e4 38#include <asm/procinfo.h>
37efe642 39#include <asm/sections.h>
1da177e4 40#include <asm/setup.h>
f00ec48f 41#include <asm/smp_plat.h>
1da177e4
LT
42#include <asm/mach-types.h>
43#include <asm/cacheflush.h>
46097c7d 44#include <asm/cachetype.h>
1da177e4
LT
45#include <asm/tlbflush.h>
46
93c02ab4 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/mach/arch.h>
49#include <asm/mach/irq.h>
50#include <asm/mach/time.h>
9f97da78
DH
51#include <asm/system_info.h>
52#include <asm/system_misc.h>
5cbad0eb 53#include <asm/traps.h>
bff595c1 54#include <asm/unwind.h>
1c16d242 55#include <asm/memblock.h>
4588c34d 56#include <asm/virt.h>
1da177e4 57
4cd9d6f7 58#include "atags.h"
bc581770 59#include "tcm.h"
0fc1c832 60
1da177e4
LT
61
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63char fpe_type[8];
64
65static int __init fpe_setup(char *line)
66{
67 memcpy(fpe_type, line, 8);
68 return 1;
69}
70
71__setup("fpe=", fpe_setup);
72#endif
73
4b5f32ce 74extern void paging_init(struct machine_desc *desc);
0371d3f7 75extern void sanity_check_meminfo(void);
1da177e4 76extern void reboot_setup(char *str);
c7909509 77extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
78
79unsigned int processor_id;
c18f6581 80EXPORT_SYMBOL(processor_id);
0385ebc0 81unsigned int __machine_arch_type __read_mostly;
1da177e4 82EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 83unsigned int cacheid __read_mostly;
c0e95878 84EXPORT_SYMBOL(cacheid);
1da177e4 85
9d20fdd5
BG
86unsigned int __atags_pointer __initdata;
87
1da177e4
LT
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91unsigned int system_serial_low;
92EXPORT_SYMBOL(system_serial_low);
93
94unsigned int system_serial_high;
95EXPORT_SYMBOL(system_serial_high);
96
0385ebc0 97unsigned int elf_hwcap __read_mostly;
1da177e4
LT
98EXPORT_SYMBOL(elf_hwcap);
99
100
101#ifdef MULTI_CPU
0385ebc0 102struct processor processor __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_TLB
0385ebc0 105struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_USER
0385ebc0 108struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_CACHE
0385ebc0 111struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 112#endif
953233dc 113#ifdef CONFIG_OUTER_CACHE
0385ebc0 114struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 115EXPORT_SYMBOL(outer_cache);
953233dc 116#endif
1da177e4 117
2ecccf90
DM
118/*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
ccea7a19
RK
125struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129} ____cacheline_aligned;
130
131static struct stack stacks[NR_CPUS];
132
1da177e4
LT
133char elf_platform[ELF_PLATFORM_SIZE];
134EXPORT_SYMBOL(elf_platform);
135
1da177e4
LT
136static const char *cpu_name;
137static const char *machine_name;
48ab7e09 138static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 139struct machine_desc *machine_desc __initdata;
1da177e4 140
1da177e4
LT
141static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142#define ENDIANNESS ((char)endian_test.l)
143
144DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146/*
147 * Standard memory resources
148 */
149static struct resource mem_res[] = {
740e518e
GKH
150 {
151 .name = "Video RAM",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_MEM
155 },
156 {
a36d8e5b 157 .name = "Kernel code",
740e518e
GKH
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_MEM
161 },
162 {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_MEM
167 }
1da177e4
LT
168};
169
170#define video_ram mem_res[0]
171#define kernel_code mem_res[1]
172#define kernel_data mem_res[2]
173
174static struct resource io_res[] = {
740e518e
GKH
175 {
176 .name = "reserved",
177 .start = 0x3bc,
178 .end = 0x3be,
179 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180 },
181 {
182 .name = "reserved",
183 .start = 0x378,
184 .end = 0x37f,
185 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 },
187 {
188 .name = "reserved",
189 .start = 0x278,
190 .end = 0x27f,
191 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 }
1da177e4
LT
193};
194
195#define lp0 io_res[0]
196#define lp1 io_res[1]
197#define lp2 io_res[2]
198
1da177e4
LT
199static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
6b090a25 209 "7",
1da177e4
LT
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217};
218
2ecccf90 219static int __get_cpu_architecture(void)
1da177e4
LT
220{
221 int cpu_arch;
222
0ba8b9b2 223 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 224 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
229 if (cpu_arch)
230 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
232 unsigned int mmfr0;
233
234 /* Revised CPUID format. Read the Memory Model Feature
235 * Register 0 and check for VMSAv7 or PMSAv7 */
236 asm("mrc p15, 0, %0, c0, c1, 4"
237 : "=r" (mmfr0));
315cfe78
CM
238 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
240 cpu_arch = CPU_ARCH_ARMv7;
241 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 (mmfr0 & 0x000000f0) == 0x00000020)
243 cpu_arch = CPU_ARCH_ARMv6;
244 else
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else
247 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
248
249 return cpu_arch;
250}
251
2ecccf90
DM
252int __pure cpu_architecture(void)
253{
254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256 return __cpu_architecture;
257}
258
8925ec4c
WD
259static int cpu_has_aliasing_icache(unsigned int arch)
260{
261 int aliasing_icache;
262 unsigned int id_reg, num_sets, line_size;
263
7f94e9cc
WD
264 /* PIPT caches never alias. */
265 if (icache_is_pipt())
266 return 0;
267
8925ec4c
WD
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
5fb31a96
LW
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
8925ec4c 273 : "r" (1));
5fb31a96
LW
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
8925ec4c
WD
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
287 }
288
289 return aliasing_icache;
290}
291
c0e95878
RK
292static void __init cacheid_init(void)
293{
294 unsigned int cachetype = read_cpuid_cachetype();
295 unsigned int arch = cpu_architecture();
296
b57ee99f
CM
297 if (arch >= CPU_ARCH_ARMv6) {
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
72dc53ac 300 arch = CPU_ARCH_ARMv7;
b57ee99f 301 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
302 switch (cachetype & (3 << 14)) {
303 case (1 << 14):
b57ee99f 304 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
305 break;
306 case (3 << 14):
307 cacheid |= CACHEID_PIPT;
308 break;
309 }
8925ec4c 310 } else {
72dc53ac
WD
311 arch = CPU_ARCH_ARMv6;
312 if (cachetype & (1 << 23))
313 cacheid = CACHEID_VIPT_ALIASING;
314 else
315 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 316 }
72dc53ac
WD
317 if (cpu_has_aliasing_icache(arch))
318 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
319 } else {
320 cacheid = CACHEID_VIVT;
321 }
2b4ae1f1
RK
322
323 printk("CPU: %s data cache, %s instruction cache\n",
324 cache_is_vivt() ? "VIVT" :
325 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
327 cache_is_vivt() ? "VIVT" :
328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 329 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 330 icache_is_pipt() ? "PIPT" :
2b4ae1f1 331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
332}
333
1da177e4
LT
334/*
335 * These functions re-use the assembly code in head.S, which
336 * already provide the required functionality.
337 */
0f44ba1d 338extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 339
93c02ab4 340void __init early_print(const char *str, ...)
6fc31d54
RK
341{
342 extern void printascii(const char *);
343 char buf[256];
344 va_list ap;
345
346 va_start(ap, str);
347 vsnprintf(buf, sizeof(buf), str, ap);
348 va_end(ap);
349
350#ifdef CONFIG_DEBUG_LL
351 printascii(buf);
352#endif
353 printk("%s", buf);
354}
355
f159f4ed
TL
356static void __init feat_v6_fixup(void)
357{
358 int id = read_cpuid_id();
359
360 if ((id & 0xff0f0000) != 0x41070000)
361 return;
362
363 /*
364 * HWCAP_TLS is available only on 1136 r1p0 and later,
365 * see also kuser_get_tls_init.
366 */
367 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
368 elf_hwcap &= ~HWCAP_TLS;
369}
370
ccea7a19
RK
371/*
372 * cpu_init - initialise one CPU.
373 *
90f1e084 374 * cpu_init sets up the per-CPU stacks.
ccea7a19 375 */
36c5ed23 376void cpu_init(void)
ccea7a19
RK
377{
378 unsigned int cpu = smp_processor_id();
379 struct stack *stk = &stacks[cpu];
380
381 if (cpu >= NR_CPUS) {
382 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
383 BUG();
384 }
385
14318efb
RH
386 /*
387 * This only works on resume and secondary cores. For booting on the
388 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
389 */
390 set_my_cpu_offset(per_cpu_offset(cpu));
391
b69874e4
RK
392 cpu_proc_init();
393
b86040a5
CM
394 /*
395 * Define the placement constraint for the inline asm directive below.
396 * In Thumb-2, msr with an immediate value is not allowed.
397 */
398#ifdef CONFIG_THUMB2_KERNEL
399#define PLC "r"
400#else
401#define PLC "I"
402#endif
403
ccea7a19
RK
404 /*
405 * setup stacks for re-entrant exception handlers
406 */
407 __asm__ (
408 "msr cpsr_c, %1\n\t"
b86040a5
CM
409 "add r14, %0, %2\n\t"
410 "mov sp, r14\n\t"
ccea7a19 411 "msr cpsr_c, %3\n\t"
b86040a5
CM
412 "add r14, %0, %4\n\t"
413 "mov sp, r14\n\t"
ccea7a19 414 "msr cpsr_c, %5\n\t"
b86040a5
CM
415 "add r14, %0, %6\n\t"
416 "mov sp, r14\n\t"
ccea7a19
RK
417 "msr cpsr_c, %7"
418 :
419 : "r" (stk),
b86040a5 420 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 421 "I" (offsetof(struct stack, irq[0])),
b86040a5 422 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 423 "I" (offsetof(struct stack, abt[0])),
b86040a5 424 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 425 "I" (offsetof(struct stack, und[0])),
b86040a5 426 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 427 : "r14");
ccea7a19
RK
428}
429
eb50439b
WD
430int __cpu_logical_map[NR_CPUS];
431
432void __init smp_setup_processor_id(void)
433{
434 int i;
cb8cf4f8
LP
435 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
436 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
437
438 cpu_logical_map(0) = cpu;
cb8cf4f8 439 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
440 cpu_logical_map(i) = i == cpu ? 0 : i;
441
cb8cf4f8 442 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
443}
444
b69874e4
RK
445static void __init setup_processor(void)
446{
447 struct proc_info_list *list;
448
449 /*
450 * locate processor in the list of supported processor
451 * types. The linker builds this table for us from the
452 * entries in arch/arm/mm/proc-*.S
453 */
454 list = lookup_processor_type(read_cpuid_id());
455 if (!list) {
456 printk("CPU configuration botched (ID %08x), unable "
457 "to continue.\n", read_cpuid_id());
458 while (1);
459 }
460
461 cpu_name = list->cpu_name;
2ecccf90 462 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
463
464#ifdef MULTI_CPU
465 processor = *list->proc;
466#endif
467#ifdef MULTI_TLB
468 cpu_tlb = *list->tlb;
469#endif
470#ifdef MULTI_USER
471 cpu_user = *list->user;
472#endif
473#ifdef MULTI_CACHE
474 cpu_cache = *list->cache;
475#endif
476
477 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
478 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
479 proc_arch[cpu_architecture()], cr_alignment);
480
a34dbfb0
WD
481 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
482 list->arch_name, ENDIANNESS);
483 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484 list->elf_name, ENDIANNESS);
b69874e4
RK
485 elf_hwcap = list->elf_hwcap;
486#ifndef CONFIG_ARM_THUMB
487 elf_hwcap &= ~HWCAP_THUMB;
488#endif
489
490 feat_v6_fixup();
491
492 cacheid_init();
493 cpu_init();
494}
495
93c02ab4 496void __init dump_machine_table(void)
1da177e4 497{
dce72dd0 498 struct machine_desc *p;
1da177e4 499
6291319d
GL
500 early_print("Available machine support:\n\nID (hex)\tNAME\n");
501 for_each_machine_desc(p)
dce72dd0 502 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 503
dce72dd0 504 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 505
dce72dd0
NP
506 while (true)
507 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
508}
509
a5d5f7da 510int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 511{
4b5f32ce
NP
512 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
513
514 if (meminfo.nr_banks >= NR_BANKS) {
515 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 516 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
517 return -EINVAL;
518 }
05f96ef1 519
3a669411
RK
520 /*
521 * Ensure that start/size are aligned to a page boundary.
522 * Size is appropriately rounded down, start is rounded up.
523 */
524 size -= start & ~PAGE_MASK;
05f96ef1 525 bank->start = PAGE_ALIGN(start);
e5ab8580
WD
526
527#ifndef CONFIG_LPAE
528 if (bank->start + size < bank->start) {
529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 "32-bit physical address space\n", (long long)start);
531 /*
532 * To ensure bank->start + bank->size is representable in
533 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
534 * This means we lose a page after masking.
535 */
536 size = ULONG_MAX - bank->start;
537 }
538#endif
539
a5d5f7da 540 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
541
542 /*
543 * Check whether this memory region has non-zero size or
544 * invalid node number.
545 */
be370302 546 if (bank->size == 0)
4b5f32ce
NP
547 return -EINVAL;
548
549 meminfo.nr_banks++;
550 return 0;
3a669411
RK
551}
552
1da177e4
LT
553/*
554 * Pick out the memory size. We look for mem=size@start,
555 * where start and size are "size[KkMm]"
556 */
2b0d8c25 557static int __init early_mem(char *p)
1da177e4
LT
558{
559 static int usermem __initdata = 0;
a5d5f7da 560 phys_addr_t size;
f60892d3 561 phys_addr_t start;
2b0d8c25 562 char *endp;
1da177e4
LT
563
564 /*
565 * If the user specifies memory size, we
566 * blow away any automatically generated
567 * size.
568 */
569 if (usermem == 0) {
570 usermem = 1;
571 meminfo.nr_banks = 0;
572 }
573
574 start = PHYS_OFFSET;
2b0d8c25
JK
575 size = memparse(p, &endp);
576 if (*endp == '@')
577 start = memparse(endp + 1, NULL);
1da177e4 578
1c97b73e 579 arm_add_memory(start, size);
1da177e4 580
2b0d8c25 581 return 0;
1da177e4 582}
2b0d8c25 583early_param("mem", early_mem);
1da177e4 584
11b9369c 585static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 586{
11b9369c 587 struct memblock_region *region;
1da177e4 588 struct resource *res;
1da177e4 589
37efe642
RK
590 kernel_code.start = virt_to_phys(_text);
591 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 592 kernel_data.start = virt_to_phys(_sdata);
37efe642 593 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 594
11b9369c 595 for_each_memblock(memory, region) {
1da177e4
LT
596 res = alloc_bootmem_low(sizeof(*res));
597 res->name = "System RAM";
11b9369c
DZ
598 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
599 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
600 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
601
602 request_resource(&iomem_resource, res);
603
604 if (kernel_code.start >= res->start &&
605 kernel_code.end <= res->end)
606 request_resource(res, &kernel_code);
607 if (kernel_data.start >= res->start &&
608 kernel_data.end <= res->end)
609 request_resource(res, &kernel_data);
610 }
611
612 if (mdesc->video_start) {
613 video_ram.start = mdesc->video_start;
614 video_ram.end = mdesc->video_end;
615 request_resource(&iomem_resource, &video_ram);
616 }
617
618 /*
619 * Some machines don't have the possibility of ever
620 * possessing lp0, lp1 or lp2
621 */
622 if (mdesc->reserve_lp0)
623 request_resource(&ioport_resource, &lp0);
624 if (mdesc->reserve_lp1)
625 request_resource(&ioport_resource, &lp1);
626 if (mdesc->reserve_lp2)
627 request_resource(&ioport_resource, &lp2);
628}
629
1da177e4
LT
630#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
631struct screen_info screen_info = {
632 .orig_video_lines = 30,
633 .orig_video_cols = 80,
634 .orig_video_mode = 0,
635 .orig_video_ega_bx = 0,
636 .orig_video_isVGA = 1,
637 .orig_video_points = 8
638};
4394c124 639#endif
1da177e4 640
1da177e4
LT
641static int __init customize_machine(void)
642{
643 /* customizes platform devices, or adds new ones */
8ff1443c
RK
644 if (machine_desc->init_machine)
645 machine_desc->init_machine();
1da177e4
LT
646 return 0;
647}
648arch_initcall(customize_machine);
649
90de4137
SG
650static int __init init_machine_late(void)
651{
652 if (machine_desc->init_late)
653 machine_desc->init_late();
654 return 0;
655}
656late_initcall(init_machine_late);
657
3c57fb43
MW
658#ifdef CONFIG_KEXEC
659static inline unsigned long long get_total_mem(void)
660{
661 unsigned long total;
662
663 total = max_low_pfn - min_low_pfn;
664 return total << PAGE_SHIFT;
665}
666
667/**
668 * reserve_crashkernel() - reserves memory are for crash kernel
669 *
670 * This function reserves memory area given in "crashkernel=" kernel command
671 * line parameter. The memory reserved is used by a dump capture kernel when
672 * primary kernel is crashing.
673 */
674static void __init reserve_crashkernel(void)
675{
676 unsigned long long crash_size, crash_base;
677 unsigned long long total_mem;
678 int ret;
679
680 total_mem = get_total_mem();
681 ret = parse_crashkernel(boot_command_line, total_mem,
682 &crash_size, &crash_base);
683 if (ret)
684 return;
685
686 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
687 if (ret < 0) {
688 printk(KERN_WARNING "crashkernel reservation failed - "
689 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
690 return;
691 }
692
693 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
694 "for crashkernel (System RAM: %ldMB)\n",
695 (unsigned long)(crash_size >> 20),
696 (unsigned long)(crash_base >> 20),
697 (unsigned long)(total_mem >> 20));
698
699 crashk_res.start = crash_base;
700 crashk_res.end = crash_base + crash_size - 1;
701 insert_resource(&iomem_resource, &crashk_res);
702}
703#else
704static inline void reserve_crashkernel(void) {}
705#endif /* CONFIG_KEXEC */
706
27a3f0e9
NP
707static int __init meminfo_cmp(const void *_a, const void *_b)
708{
709 const struct membank *a = _a, *b = _b;
710 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
711 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
712}
6291319d 713
4588c34d
DM
714void __init hyp_mode_check(void)
715{
716#ifdef CONFIG_ARM_VIRT_EXT
717 if (is_hyp_mode_available()) {
718 pr_info("CPU: All CPU(s) started in HYP mode.\n");
719 pr_info("CPU: Virtualization extensions available.\n");
720 } else if (is_hyp_mode_mismatched()) {
721 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
722 __boot_cpu_mode & MODE_MASK);
723 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
724 } else
725 pr_info("CPU: All CPU(s) started in SVC mode.\n");
726#endif
727}
728
6291319d
GL
729void __init setup_arch(char **cmdline_p)
730{
731 struct machine_desc *mdesc;
732
6291319d 733 setup_processor();
93c02ab4
GL
734 mdesc = setup_machine_fdt(__atags_pointer);
735 if (!mdesc)
b8b499c8 736 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
737 machine_desc = mdesc;
738 machine_name = mdesc->name;
739
c7909509
MS
740 setup_dma_zone(mdesc);
741
b44c350d
RK
742 if (mdesc->restart_mode)
743 reboot_setup(&mdesc->restart_mode);
6291319d 744
37efe642
RK
745 init_mm.start_code = (unsigned long) _text;
746 init_mm.end_code = (unsigned long) _etext;
747 init_mm.end_data = (unsigned long) _edata;
748 init_mm.brk = (unsigned long) _end;
1da177e4 749
48ab7e09
JK
750 /* populate cmd_line too for later use, preserving boot_command_line */
751 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
752 *cmdline_p = cmd_line;
2b0d8c25
JK
753
754 parse_early_param();
755
27a3f0e9 756 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 757 sanity_check_meminfo();
8d717a52 758 arm_memblock_init(&meminfo, mdesc);
2778f620 759
4b5f32ce 760 paging_init(mdesc);
11b9369c 761 request_standard_resources(mdesc);
1da177e4 762
a528721d
RK
763 if (mdesc->restart)
764 arm_pm_restart = mdesc->restart;
765
93c02ab4
GL
766 unflatten_device_tree();
767
5587164e 768 arm_dt_init_cpu_maps();
7bbb7940 769#ifdef CONFIG_SMP
abcee5fb
MZ
770 if (is_smp()) {
771 smp_set_ops(mdesc->smp);
f00ec48f 772 smp_init_cpus();
abcee5fb 773 }
7bbb7940 774#endif
4588c34d
DM
775
776 if (!is_smp())
777 hyp_mode_check();
778
3c57fb43 779 reserve_crashkernel();
7bbb7940 780
bc581770 781 tcm_init();
ccea7a19 782
52108641 783#ifdef CONFIG_MULTI_IRQ_HANDLER
784 handle_arch_irq = mdesc->handle_irq;
785#endif
1da177e4
LT
786
787#ifdef CONFIG_VT
788#if defined(CONFIG_VGA_CONSOLE)
789 conswitchp = &vga_con;
790#elif defined(CONFIG_DUMMY_CONSOLE)
791 conswitchp = &dummy_con;
792#endif
793#endif
dec12e62
RK
794
795 if (mdesc->init_early)
796 mdesc->init_early();
1da177e4
LT
797}
798
799
800static int __init topology_init(void)
801{
802 int cpu;
803
66fb8bd2
RK
804 for_each_possible_cpu(cpu) {
805 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
806 cpuinfo->cpu.hotpluggable = 1;
807 register_cpu(&cpuinfo->cpu, cpu);
808 }
1da177e4
LT
809
810 return 0;
811}
1da177e4
LT
812subsys_initcall(topology_init);
813
e119bfff
RK
814#ifdef CONFIG_HAVE_PROC_CPU
815static int __init proc_cpu_init(void)
816{
817 struct proc_dir_entry *res;
818
819 res = proc_mkdir("cpu", NULL);
820 if (!res)
821 return -ENOMEM;
822 return 0;
823}
824fs_initcall(proc_cpu_init);
825#endif
826
1da177e4
LT
827static const char *hwcap_str[] = {
828 "swp",
829 "half",
830 "thumb",
831 "26bit",
832 "fastmult",
833 "fpa",
834 "vfp",
835 "edsp",
836 "java",
8f7f9435 837 "iwmmxt",
99e4a6dd 838 "crunch",
4369ae16 839 "thumbee",
2bedbdf4 840 "neon",
7279dc3e
CM
841 "vfpv3",
842 "vfpv3d16",
254cdf8e
WD
843 "tls",
844 "vfpv4",
845 "idiva",
846 "idivt",
1da177e4
LT
847 NULL
848};
849
1da177e4
LT
850static int c_show(struct seq_file *m, void *v)
851{
b4b8f770
LP
852 int i, j;
853 u32 cpuid;
1da177e4 854
1da177e4 855 for_each_online_cpu(i) {
15559722
RK
856 /*
857 * glibc reads /proc/cpuinfo to determine the number of
858 * online processors, looking for lines beginning with
859 * "processor". Give glibc what it expects.
860 */
861 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
862 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
863 seq_printf(m, "model name\t: %s rev %d (%s)\n",
864 cpu_name, cpuid & 15, elf_platform);
865
866#if defined(CONFIG_SMP)
867 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
868 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
869 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
870#else
871 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
872 loops_per_jiffy / (500000/HZ),
873 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 874#endif
b4b8f770
LP
875 /* dump out the processor features */
876 seq_puts(m, "Features\t: ");
1da177e4 877
b4b8f770
LP
878 for (j = 0; hwcap_str[j]; j++)
879 if (elf_hwcap & (1 << j))
880 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 881
b4b8f770
LP
882 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
883 seq_printf(m, "CPU architecture: %s\n",
884 proc_arch[cpu_architecture()]);
1da177e4 885
b4b8f770
LP
886 if ((cpuid & 0x0008f000) == 0x00000000) {
887 /* pre-ARM7 */
888 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 889 } else {
b4b8f770
LP
890 if ((cpuid & 0x0008f000) == 0x00007000) {
891 /* ARM7 */
892 seq_printf(m, "CPU variant\t: 0x%02x\n",
893 (cpuid >> 16) & 127);
894 } else {
895 /* post-ARM7 */
896 seq_printf(m, "CPU variant\t: 0x%x\n",
897 (cpuid >> 20) & 15);
898 }
899 seq_printf(m, "CPU part\t: 0x%03x\n",
900 (cpuid >> 4) & 0xfff);
1da177e4 901 }
b4b8f770 902 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 903 }
1da177e4
LT
904
905 seq_printf(m, "Hardware\t: %s\n", machine_name);
906 seq_printf(m, "Revision\t: %04x\n", system_rev);
907 seq_printf(m, "Serial\t\t: %08x%08x\n",
908 system_serial_high, system_serial_low);
909
910 return 0;
911}
912
913static void *c_start(struct seq_file *m, loff_t *pos)
914{
915 return *pos < 1 ? (void *)1 : NULL;
916}
917
918static void *c_next(struct seq_file *m, void *v, loff_t *pos)
919{
920 ++*pos;
921 return NULL;
922}
923
924static void c_stop(struct seq_file *m, void *v)
925{
926}
927
2ffd6e18 928const struct seq_operations cpuinfo_op = {
1da177e4
LT
929 .start = c_start,
930 .next = c_next,
931 .stop = c_stop,
932 .show = c_show
933};