]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/kernel/setup.c
arm/dt: consolidate atags setup into setup_machine_atags
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4 38#include <asm/setup.h>
f00ec48f 39#include <asm/smp_plat.h>
1da177e4
LT
40#include <asm/mach-types.h>
41#include <asm/cacheflush.h>
46097c7d 42#include <asm/cachetype.h>
1da177e4
LT
43#include <asm/tlbflush.h>
44
45#include <asm/mach/arch.h>
46#include <asm/mach/irq.h>
47#include <asm/mach/time.h>
5cbad0eb 48#include <asm/traps.h>
bff595c1 49#include <asm/unwind.h>
1da177e4 50
73a65b3f 51#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 52#include "compat.h"
73a65b3f 53#endif
4cd9d6f7 54#include "atags.h"
bc581770 55#include "tcm.h"
0fc1c832 56
1da177e4
LT
57#ifndef MEM_SIZE
58#define MEM_SIZE (16*1024*1024)
59#endif
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
1da177e4 74extern void reboot_setup(char *str);
1da177e4
LT
75
76unsigned int processor_id;
c18f6581 77EXPORT_SYMBOL(processor_id);
0385ebc0 78unsigned int __machine_arch_type __read_mostly;
1da177e4 79EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 80unsigned int cacheid __read_mostly;
c0e95878 81EXPORT_SYMBOL(cacheid);
1da177e4 82
9d20fdd5
BG
83unsigned int __atags_pointer __initdata;
84
1da177e4
LT
85unsigned int system_rev;
86EXPORT_SYMBOL(system_rev);
87
88unsigned int system_serial_low;
89EXPORT_SYMBOL(system_serial_low);
90
91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high);
93
0385ebc0 94unsigned int elf_hwcap __read_mostly;
1da177e4
LT
95EXPORT_SYMBOL(elf_hwcap);
96
97
98#ifdef MULTI_CPU
0385ebc0 99struct processor processor __read_mostly;
1da177e4
LT
100#endif
101#ifdef MULTI_TLB
0385ebc0 102struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_USER
0385ebc0 105struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_CACHE
0385ebc0 108struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 109#endif
953233dc 110#ifdef CONFIG_OUTER_CACHE
0385ebc0 111struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 112EXPORT_SYMBOL(outer_cache);
953233dc 113#endif
1da177e4 114
ccea7a19
RK
115struct stack {
116 u32 irq[3];
117 u32 abt[3];
118 u32 und[3];
119} ____cacheline_aligned;
120
121static struct stack stacks[NR_CPUS];
122
1da177e4
LT
123char elf_platform[ELF_PLATFORM_SIZE];
124EXPORT_SYMBOL(elf_platform);
125
1da177e4
LT
126static const char *cpu_name;
127static const char *machine_name;
48ab7e09 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 129struct machine_desc *machine_desc __initdata;
1da177e4
LT
130
131static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133#define ENDIANNESS ((char)endian_test.l)
134
135DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137/*
138 * Standard memory resources
139 */
140static struct resource mem_res[] = {
740e518e
GKH
141 {
142 .name = "Video RAM",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_MEM
146 },
147 {
148 .name = "Kernel text",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_MEM
152 },
153 {
154 .name = "Kernel data",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_MEM
158 }
1da177e4
LT
159};
160
161#define video_ram mem_res[0]
162#define kernel_code mem_res[1]
163#define kernel_data mem_res[2]
164
165static struct resource io_res[] = {
740e518e
GKH
166 {
167 .name = "reserved",
168 .start = 0x3bc,
169 .end = 0x3be,
170 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171 },
172 {
173 .name = "reserved",
174 .start = 0x378,
175 .end = 0x37f,
176 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 },
178 {
179 .name = "reserved",
180 .start = 0x278,
181 .end = 0x27f,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183 }
1da177e4
LT
184};
185
186#define lp0 io_res[0]
187#define lp1 io_res[1]
188#define lp2 io_res[2]
189
1da177e4
LT
190static const char *proc_arch[] = {
191 "undefined/unknown",
192 "3",
193 "4",
194 "4T",
195 "5",
196 "5T",
197 "5TE",
198 "5TEJ",
199 "6TEJ",
6b090a25 200 "7",
1da177e4
LT
201 "?(11)",
202 "?(12)",
203 "?(13)",
204 "?(14)",
205 "?(15)",
206 "?(16)",
207 "?(17)",
208};
209
1da177e4
LT
210int cpu_architecture(void)
211{
212 int cpu_arch;
213
0ba8b9b2 214 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 215 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
216 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
220 if (cpu_arch)
221 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 222 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
223 unsigned int mmfr0;
224
225 /* Revised CPUID format. Read the Memory Model Feature
226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0));
315cfe78
CM
229 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020)
234 cpu_arch = CPU_ARCH_ARMv6;
235 else
236 cpu_arch = CPU_ARCH_UNKNOWN;
237 } else
238 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
239
240 return cpu_arch;
241}
242
8925ec4c
WD
243static int cpu_has_aliasing_icache(unsigned int arch)
244{
245 int aliasing_icache;
246 unsigned int id_reg, num_sets, line_size;
247
248 /* arch specifies the register format */
249 switch (arch) {
250 case CPU_ARCH_ARMv7:
5fb31a96
LW
251 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 : /* No output operands */
8925ec4c 253 : "r" (1));
5fb31a96
LW
254 isb();
255 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 : "=r" (id_reg));
8925ec4c
WD
257 line_size = 4 << ((id_reg & 0x7) + 2);
258 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260 break;
261 case CPU_ARCH_ARMv6:
262 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263 break;
264 default:
265 /* I-cache aliases will be handled by D-cache aliasing code */
266 aliasing_icache = 0;
267 }
268
269 return aliasing_icache;
270}
271
c0e95878
RK
272static void __init cacheid_init(void)
273{
274 unsigned int cachetype = read_cpuid_cachetype();
275 unsigned int arch = cpu_architecture();
276
b57ee99f
CM
277 if (arch >= CPU_ARCH_ARMv6) {
278 if ((cachetype & (7 << 29)) == 4 << 29) {
279 /* ARMv7 register format */
280 cacheid = CACHEID_VIPT_NONALIASING;
281 if ((cachetype & (3 << 14)) == 1 << 14)
282 cacheid |= CACHEID_ASID_TAGGED;
8925ec4c
WD
283 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284 cacheid |= CACHEID_VIPT_I_ALIASING;
285 } else if (cachetype & (1 << 23)) {
c0e95878 286 cacheid = CACHEID_VIPT_ALIASING;
8925ec4c 287 } else {
c0e95878 288 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c
WD
289 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290 cacheid |= CACHEID_VIPT_I_ALIASING;
291 }
c0e95878
RK
292 } else {
293 cacheid = CACHEID_VIVT;
294 }
2b4ae1f1
RK
295
296 printk("CPU: %s data cache, %s instruction cache\n",
297 cache_is_vivt() ? "VIVT" :
298 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 cache_is_vivt() ? "VIVT" :
301 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 302 icache_is_vipt_aliasing() ? "VIPT aliasing" :
2b4ae1f1 303 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
304}
305
1da177e4
LT
306/*
307 * These functions re-use the assembly code in head.S, which
308 * already provide the required functionality.
309 */
0f44ba1d 310extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54
RK
311
312static void __init early_print(const char *str, ...)
313{
314 extern void printascii(const char *);
315 char buf[256];
316 va_list ap;
317
318 va_start(ap, str);
319 vsnprintf(buf, sizeof(buf), str, ap);
320 va_end(ap);
321
322#ifdef CONFIG_DEBUG_LL
323 printascii(buf);
324#endif
325 printk("%s", buf);
326}
327
f159f4ed
TL
328static void __init feat_v6_fixup(void)
329{
330 int id = read_cpuid_id();
331
332 if ((id & 0xff0f0000) != 0x41070000)
333 return;
334
335 /*
336 * HWCAP_TLS is available only on 1136 r1p0 and later,
337 * see also kuser_get_tls_init.
338 */
339 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
340 elf_hwcap &= ~HWCAP_TLS;
341}
342
1da177e4
LT
343static void __init setup_processor(void)
344{
345 struct proc_info_list *list;
346
347 /*
348 * locate processor in the list of supported processor
349 * types. The linker builds this table for us from the
350 * entries in arch/arm/mm/proc-*.S
351 */
0ba8b9b2 352 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
353 if (!list) {
354 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 355 "to continue.\n", read_cpuid_id());
1da177e4
LT
356 while (1);
357 }
358
359 cpu_name = list->cpu_name;
360
361#ifdef MULTI_CPU
362 processor = *list->proc;
363#endif
364#ifdef MULTI_TLB
365 cpu_tlb = *list->tlb;
366#endif
367#ifdef MULTI_USER
368 cpu_user = *list->user;
369#endif
370#ifdef MULTI_CACHE
371 cpu_cache = *list->cache;
372#endif
373
4e19025b 374 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 375 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 376 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 377
96b644bd 378 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
379 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
380 elf_hwcap = list->elf_hwcap;
adeff422
CM
381#ifndef CONFIG_ARM_THUMB
382 elf_hwcap &= ~HWCAP_THUMB;
383#endif
1da177e4 384
f159f4ed
TL
385 feat_v6_fixup();
386
c0e95878 387 cacheid_init();
1da177e4
LT
388 cpu_proc_init();
389}
390
ccea7a19
RK
391/*
392 * cpu_init - initialise one CPU.
393 *
90f1e084 394 * cpu_init sets up the per-CPU stacks.
ccea7a19 395 */
36c5ed23 396void cpu_init(void)
ccea7a19
RK
397{
398 unsigned int cpu = smp_processor_id();
399 struct stack *stk = &stacks[cpu];
400
401 if (cpu >= NR_CPUS) {
402 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
403 BUG();
404 }
405
b86040a5
CM
406 /*
407 * Define the placement constraint for the inline asm directive below.
408 * In Thumb-2, msr with an immediate value is not allowed.
409 */
410#ifdef CONFIG_THUMB2_KERNEL
411#define PLC "r"
412#else
413#define PLC "I"
414#endif
415
ccea7a19
RK
416 /*
417 * setup stacks for re-entrant exception handlers
418 */
419 __asm__ (
420 "msr cpsr_c, %1\n\t"
b86040a5
CM
421 "add r14, %0, %2\n\t"
422 "mov sp, r14\n\t"
ccea7a19 423 "msr cpsr_c, %3\n\t"
b86040a5
CM
424 "add r14, %0, %4\n\t"
425 "mov sp, r14\n\t"
ccea7a19 426 "msr cpsr_c, %5\n\t"
b86040a5
CM
427 "add r14, %0, %6\n\t"
428 "mov sp, r14\n\t"
ccea7a19
RK
429 "msr cpsr_c, %7"
430 :
431 : "r" (stk),
b86040a5 432 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 433 "I" (offsetof(struct stack, irq[0])),
b86040a5 434 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 435 "I" (offsetof(struct stack, abt[0])),
b86040a5 436 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 437 "I" (offsetof(struct stack, und[0])),
b86040a5 438 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 439 : "r14");
ccea7a19
RK
440}
441
6291319d 442static void __init dump_machine_table(void)
1da177e4 443{
dce72dd0 444 struct machine_desc *p;
1da177e4 445
6291319d
GL
446 early_print("Available machine support:\n\nID (hex)\tNAME\n");
447 for_each_machine_desc(p)
dce72dd0 448 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 449
dce72dd0 450 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 451
dce72dd0
NP
452 while (true)
453 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
454}
455
9eb8f674 456int __init arm_add_memory(phys_addr_t start, unsigned long size)
3a669411 457{
4b5f32ce
NP
458 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
459
460 if (meminfo.nr_banks >= NR_BANKS) {
461 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 462 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
463 return -EINVAL;
464 }
05f96ef1 465
3a669411
RK
466 /*
467 * Ensure that start/size are aligned to a page boundary.
468 * Size is appropriately rounded down, start is rounded up.
469 */
470 size -= start & ~PAGE_MASK;
05f96ef1
RK
471 bank->start = PAGE_ALIGN(start);
472 bank->size = size & PAGE_MASK;
4b5f32ce
NP
473
474 /*
475 * Check whether this memory region has non-zero size or
476 * invalid node number.
477 */
be370302 478 if (bank->size == 0)
4b5f32ce
NP
479 return -EINVAL;
480
481 meminfo.nr_banks++;
482 return 0;
3a669411
RK
483}
484
1da177e4
LT
485/*
486 * Pick out the memory size. We look for mem=size@start,
487 * where start and size are "size[KkMm]"
488 */
2b0d8c25 489static int __init early_mem(char *p)
1da177e4
LT
490{
491 static int usermem __initdata = 0;
f60892d3
WD
492 unsigned long size;
493 phys_addr_t start;
2b0d8c25 494 char *endp;
1da177e4
LT
495
496 /*
497 * If the user specifies memory size, we
498 * blow away any automatically generated
499 * size.
500 */
501 if (usermem == 0) {
502 usermem = 1;
503 meminfo.nr_banks = 0;
504 }
505
506 start = PHYS_OFFSET;
2b0d8c25
JK
507 size = memparse(p, &endp);
508 if (*endp == '@')
509 start = memparse(endp + 1, NULL);
1da177e4 510
1c97b73e 511 arm_add_memory(start, size);
1da177e4 512
2b0d8c25 513 return 0;
1da177e4 514}
2b0d8c25 515early_param("mem", early_mem);
1da177e4
LT
516
517static void __init
518setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
519{
520#ifdef CONFIG_BLK_DEV_RAM
521 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
522
523 rd_image_start = image_start;
524 rd_prompt = prompt;
525 rd_doload = doload;
526
527 if (rd_sz)
528 rd_size = rd_sz;
529#endif
530}
531
11b9369c 532static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 533{
11b9369c 534 struct memblock_region *region;
1da177e4 535 struct resource *res;
1da177e4 536
37efe642
RK
537 kernel_code.start = virt_to_phys(_text);
538 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 539 kernel_data.start = virt_to_phys(_sdata);
37efe642 540 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 541
11b9369c 542 for_each_memblock(memory, region) {
1da177e4
LT
543 res = alloc_bootmem_low(sizeof(*res));
544 res->name = "System RAM";
11b9369c
DZ
545 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
546 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
547 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
548
549 request_resource(&iomem_resource, res);
550
551 if (kernel_code.start >= res->start &&
552 kernel_code.end <= res->end)
553 request_resource(res, &kernel_code);
554 if (kernel_data.start >= res->start &&
555 kernel_data.end <= res->end)
556 request_resource(res, &kernel_data);
557 }
558
559 if (mdesc->video_start) {
560 video_ram.start = mdesc->video_start;
561 video_ram.end = mdesc->video_end;
562 request_resource(&iomem_resource, &video_ram);
563 }
564
565 /*
566 * Some machines don't have the possibility of ever
567 * possessing lp0, lp1 or lp2
568 */
569 if (mdesc->reserve_lp0)
570 request_resource(&ioport_resource, &lp0);
571 if (mdesc->reserve_lp1)
572 request_resource(&ioport_resource, &lp1);
573 if (mdesc->reserve_lp2)
574 request_resource(&ioport_resource, &lp2);
575}
576
577/*
578 * Tag parsing.
579 *
580 * This is the new way of passing data to the kernel at boot time. Rather
581 * than passing a fixed inflexible structure to the kernel, we pass a list
582 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
583 * tag for the list to be recognised (to distinguish the tagged list from
584 * a param_struct). The list is terminated with a zero-length tag (this tag
585 * is not parsed in any way).
586 */
587static int __init parse_tag_core(const struct tag *tag)
588{
589 if (tag->hdr.size > 2) {
590 if ((tag->u.core.flags & 1) == 0)
591 root_mountflags &= ~MS_RDONLY;
592 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
593 }
594 return 0;
595}
596
597__tagtable(ATAG_CORE, parse_tag_core);
598
599static int __init parse_tag_mem32(const struct tag *tag)
600{
4b5f32ce 601 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
602}
603
604__tagtable(ATAG_MEM, parse_tag_mem32);
605
606#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
607struct screen_info screen_info = {
608 .orig_video_lines = 30,
609 .orig_video_cols = 80,
610 .orig_video_mode = 0,
611 .orig_video_ega_bx = 0,
612 .orig_video_isVGA = 1,
613 .orig_video_points = 8
614};
615
616static int __init parse_tag_videotext(const struct tag *tag)
617{
618 screen_info.orig_x = tag->u.videotext.x;
619 screen_info.orig_y = tag->u.videotext.y;
620 screen_info.orig_video_page = tag->u.videotext.video_page;
621 screen_info.orig_video_mode = tag->u.videotext.video_mode;
622 screen_info.orig_video_cols = tag->u.videotext.video_cols;
623 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
624 screen_info.orig_video_lines = tag->u.videotext.video_lines;
625 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
626 screen_info.orig_video_points = tag->u.videotext.video_points;
627 return 0;
628}
629
630__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
631#endif
632
633static int __init parse_tag_ramdisk(const struct tag *tag)
634{
635 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
636 (tag->u.ramdisk.flags & 2) == 0,
637 tag->u.ramdisk.start, tag->u.ramdisk.size);
638 return 0;
639}
640
641__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
642
1da177e4
LT
643static int __init parse_tag_serialnr(const struct tag *tag)
644{
645 system_serial_low = tag->u.serialnr.low;
646 system_serial_high = tag->u.serialnr.high;
647 return 0;
648}
649
650__tagtable(ATAG_SERIAL, parse_tag_serialnr);
651
652static int __init parse_tag_revision(const struct tag *tag)
653{
654 system_rev = tag->u.revision.rev;
655 return 0;
656}
657
658__tagtable(ATAG_REVISION, parse_tag_revision);
659
660static int __init parse_tag_cmdline(const struct tag *tag)
661{
22eeb8f6 662#ifndef CONFIG_CMDLINE_FORCE
1da177e4 663 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
22eeb8f6
AH
664#else
665 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
666#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
667 return 0;
668}
669
670__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
671
672/*
673 * Scan the tag table for this tag, and call its parse function.
674 * The tag table is built by the linker from all the __tagtable
675 * declarations.
676 */
677static int __init parse_tag(const struct tag *tag)
678{
679 extern struct tagtable __tagtable_begin, __tagtable_end;
680 struct tagtable *t;
681
682 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
683 if (tag->hdr.tag == t->tag) {
684 t->parse(tag);
685 break;
686 }
687
688 return t < &__tagtable_end;
689}
690
691/*
692 * Parse all tags in the list, checking both the global and architecture
693 * specific tag tables.
694 */
695static void __init parse_tags(const struct tag *t)
696{
697 for (; t->hdr.size; t = tag_next(t))
698 if (!parse_tag(t))
699 printk(KERN_WARNING
700 "Ignoring unrecognised tag 0x%08x\n",
701 t->hdr.tag);
702}
703
704/*
705 * This holds our defaults.
706 */
707static struct init_tags {
708 struct tag_header hdr1;
709 struct tag_core core;
710 struct tag_header hdr2;
711 struct tag_mem32 mem;
712 struct tag_header hdr3;
713} init_tags __initdata = {
714 { tag_size(tag_core), ATAG_CORE },
715 { 1, PAGE_SIZE, 0xff },
716 { tag_size(tag_mem32), ATAG_MEM },
b75c178a 717 { MEM_SIZE },
1da177e4
LT
718 { 0, ATAG_NONE }
719};
720
1da177e4
LT
721static int __init customize_machine(void)
722{
723 /* customizes platform devices, or adds new ones */
8ff1443c
RK
724 if (machine_desc->init_machine)
725 machine_desc->init_machine();
1da177e4
LT
726 return 0;
727}
728arch_initcall(customize_machine);
729
3c57fb43
MW
730#ifdef CONFIG_KEXEC
731static inline unsigned long long get_total_mem(void)
732{
733 unsigned long total;
734
735 total = max_low_pfn - min_low_pfn;
736 return total << PAGE_SHIFT;
737}
738
739/**
740 * reserve_crashkernel() - reserves memory are for crash kernel
741 *
742 * This function reserves memory area given in "crashkernel=" kernel command
743 * line parameter. The memory reserved is used by a dump capture kernel when
744 * primary kernel is crashing.
745 */
746static void __init reserve_crashkernel(void)
747{
748 unsigned long long crash_size, crash_base;
749 unsigned long long total_mem;
750 int ret;
751
752 total_mem = get_total_mem();
753 ret = parse_crashkernel(boot_command_line, total_mem,
754 &crash_size, &crash_base);
755 if (ret)
756 return;
757
758 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
759 if (ret < 0) {
760 printk(KERN_WARNING "crashkernel reservation failed - "
761 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
762 return;
763 }
764
765 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
766 "for crashkernel (System RAM: %ldMB)\n",
767 (unsigned long)(crash_size >> 20),
768 (unsigned long)(crash_base >> 20),
769 (unsigned long)(total_mem >> 20));
770
771 crashk_res.start = crash_base;
772 crashk_res.end = crash_base + crash_size - 1;
773 insert_resource(&iomem_resource, &crashk_res);
774}
775#else
776static inline void reserve_crashkernel(void) {}
777#endif /* CONFIG_KEXEC */
778
73a65b3f
UKK
779static void __init squash_mem_tags(struct tag *tag)
780{
781 for (; tag->hdr.size; tag = tag_next(tag))
782 if (tag->hdr.tag == ATAG_MEM)
783 tag->hdr.tag = ATAG_NONE;
784}
785
6291319d 786static struct machine_desc * __init setup_machine_tags(unsigned int nr)
1da177e4
LT
787{
788 struct tag *tags = (struct tag *)&init_tags;
6291319d 789 struct machine_desc *mdesc = NULL, *p;
1da177e4
LT
790 char *from = default_command_line;
791
b75c178a
RK
792 init_tags.mem.start = PHYS_OFFSET;
793
6291319d
GL
794 /*
795 * locate machine in the list of supported machines.
796 */
797 for_each_machine_desc(p)
798 if (nr == p->nr) {
799 printk("Machine: %s\n", p->name);
800 mdesc = p;
801 break;
802 }
1da177e4 803
6291319d
GL
804 if (!mdesc) {
805 early_print("\nError: unrecognized/unsupported machine ID"
806 " (r1 = 0x%08x).\n\n", nr);
807 dump_machine_table(); /* does not return */
808 }
1da177e4 809
9d20fdd5
BG
810 if (__atags_pointer)
811 tags = phys_to_virt(__atags_pointer);
3572bea8
NP
812 else if (mdesc->boot_params) {
813#ifdef CONFIG_MMU
814 /*
815 * We still are executing with a minimal MMU mapping created
816 * with the presumption that the machine default for this
817 * is located in the first MB of RAM. Anything else will
818 * fault and silently hang the kernel at this point.
819 */
820 if (mdesc->boot_params < PHYS_OFFSET ||
821 mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
822 printk(KERN_WARNING
823 "Default boot params at physical 0x%08lx out of reach\n",
824 mdesc->boot_params);
825 } else
826#endif
827 {
828 tags = phys_to_virt(mdesc->boot_params);
829 }
830 }
1da177e4 831
73a65b3f 832#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
833 /*
834 * If we have the old style parameters, convert them to
835 * a tag list.
836 */
837 if (tags->hdr.tag != ATAG_CORE)
838 convert_to_tag_list(tags);
73a65b3f 839#endif
1da177e4
LT
840 if (tags->hdr.tag != ATAG_CORE)
841 tags = (struct tag *)&init_tags;
842
843 if (mdesc->fixup)
844 mdesc->fixup(mdesc, tags, &from, &meminfo);
845
846 if (tags->hdr.tag == ATAG_CORE) {
847 if (meminfo.nr_banks != 0)
848 squash_mem_tags(tags);
4cd9d6f7 849 save_atags(tags);
1da177e4
LT
850 parse_tags(tags);
851 }
852
6291319d
GL
853 /* parse_early_param needs a boot_command_line */
854 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
855
856 return mdesc;
857}
858
859
860void __init setup_arch(char **cmdline_p)
861{
862 struct machine_desc *mdesc;
863
864 unwind_init();
865
866 setup_processor();
867 mdesc = setup_machine_tags(machine_arch_type);
868 machine_desc = mdesc;
869 machine_name = mdesc->name;
870
871 if (mdesc->soft_reboot)
872 reboot_setup("s");
873
37efe642
RK
874 init_mm.start_code = (unsigned long) _text;
875 init_mm.end_code = (unsigned long) _etext;
876 init_mm.end_data = (unsigned long) _edata;
877 init_mm.brk = (unsigned long) _end;
1da177e4 878
48ab7e09
JK
879 /* populate cmd_line too for later use, preserving boot_command_line */
880 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
881 *cmdline_p = cmd_line;
2b0d8c25
JK
882
883 parse_early_param();
884
8d717a52 885 arm_memblock_init(&meminfo, mdesc);
2778f620 886
4b5f32ce 887 paging_init(mdesc);
11b9369c 888 request_standard_resources(mdesc);
1da177e4 889
7bbb7940 890#ifdef CONFIG_SMP
f00ec48f
RK
891 if (is_smp())
892 smp_init_cpus();
7bbb7940 893#endif
3c57fb43 894 reserve_crashkernel();
7bbb7940 895
ccea7a19 896 cpu_init();
bc581770 897 tcm_init();
ccea7a19 898
52108641 899#ifdef CONFIG_MULTI_IRQ_HANDLER
900 handle_arch_irq = mdesc->handle_irq;
901#endif
1da177e4
LT
902
903#ifdef CONFIG_VT
904#if defined(CONFIG_VGA_CONSOLE)
905 conswitchp = &vga_con;
906#elif defined(CONFIG_DUMMY_CONSOLE)
907 conswitchp = &dummy_con;
908#endif
909#endif
5cbad0eb 910 early_trap_init();
dec12e62
RK
911
912 if (mdesc->init_early)
913 mdesc->init_early();
1da177e4
LT
914}
915
916
917static int __init topology_init(void)
918{
919 int cpu;
920
66fb8bd2
RK
921 for_each_possible_cpu(cpu) {
922 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
923 cpuinfo->cpu.hotpluggable = 1;
924 register_cpu(&cpuinfo->cpu, cpu);
925 }
1da177e4
LT
926
927 return 0;
928}
1da177e4
LT
929subsys_initcall(topology_init);
930
e119bfff
RK
931#ifdef CONFIG_HAVE_PROC_CPU
932static int __init proc_cpu_init(void)
933{
934 struct proc_dir_entry *res;
935
936 res = proc_mkdir("cpu", NULL);
937 if (!res)
938 return -ENOMEM;
939 return 0;
940}
941fs_initcall(proc_cpu_init);
942#endif
943
1da177e4
LT
944static const char *hwcap_str[] = {
945 "swp",
946 "half",
947 "thumb",
948 "26bit",
949 "fastmult",
950 "fpa",
951 "vfp",
952 "edsp",
953 "java",
8f7f9435 954 "iwmmxt",
99e4a6dd 955 "crunch",
4369ae16 956 "thumbee",
2bedbdf4 957 "neon",
7279dc3e
CM
958 "vfpv3",
959 "vfpv3d16",
1da177e4
LT
960 NULL
961};
962
1da177e4
LT
963static int c_show(struct seq_file *m, void *v)
964{
965 int i;
966
967 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 968 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
969
970#if defined(CONFIG_SMP)
971 for_each_online_cpu(i) {
15559722
RK
972 /*
973 * glibc reads /proc/cpuinfo to determine the number of
974 * online processors, looking for lines beginning with
975 * "processor". Give glibc what it expects.
976 */
977 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
978 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
979 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
980 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
981 }
982#else /* CONFIG_SMP */
983 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
984 loops_per_jiffy / (500000/HZ),
985 (loops_per_jiffy / (5000/HZ)) % 100);
986#endif
987
988 /* dump out the processor features */
989 seq_puts(m, "Features\t: ");
990
991 for (i = 0; hwcap_str[i]; i++)
992 if (elf_hwcap & (1 << i))
993 seq_printf(m, "%s ", hwcap_str[i]);
994
0ba8b9b2 995 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
996 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
997
0ba8b9b2 998 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 999 /* pre-ARM7 */
0ba8b9b2 1000 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 1001 } else {
0ba8b9b2 1002 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
1003 /* ARM7 */
1004 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 1005 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
1006 } else {
1007 /* post-ARM7 */
1008 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 1009 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
1010 }
1011 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 1012 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 1013 }
0ba8b9b2 1014 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 1015
1da177e4
LT
1016 seq_puts(m, "\n");
1017
1018 seq_printf(m, "Hardware\t: %s\n", machine_name);
1019 seq_printf(m, "Revision\t: %04x\n", system_rev);
1020 seq_printf(m, "Serial\t\t: %08x%08x\n",
1021 system_serial_high, system_serial_low);
1022
1023 return 0;
1024}
1025
1026static void *c_start(struct seq_file *m, loff_t *pos)
1027{
1028 return *pos < 1 ? (void *)1 : NULL;
1029}
1030
1031static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1032{
1033 ++*pos;
1034 return NULL;
1035}
1036
1037static void c_stop(struct seq_file *m, void *v)
1038{
1039}
1040
2ffd6e18 1041const struct seq_operations cpuinfo_op = {
1da177e4
LT
1042 .start = c_start,
1043 .next = c_next,
1044 .stop = c_stop,
1045 .show = c_show
1046};