]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/arm/kernel/setup.c
ARM: 6675/1: use phys_addr_t instead of unsigned long in conversion code
[mirror_ubuntu-zesty-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4 38#include <asm/setup.h>
f00ec48f 39#include <asm/smp_plat.h>
1da177e4
LT
40#include <asm/mach-types.h>
41#include <asm/cacheflush.h>
46097c7d 42#include <asm/cachetype.h>
1da177e4
LT
43#include <asm/tlbflush.h>
44
45#include <asm/mach/arch.h>
46#include <asm/mach/irq.h>
47#include <asm/mach/time.h>
5cbad0eb 48#include <asm/traps.h>
bff595c1 49#include <asm/unwind.h>
1da177e4 50
73a65b3f 51#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 52#include "compat.h"
73a65b3f 53#endif
4cd9d6f7 54#include "atags.h"
bc581770 55#include "tcm.h"
0fc1c832 56
1da177e4
LT
57#ifndef MEM_SIZE
58#define MEM_SIZE (16*1024*1024)
59#endif
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
1da177e4 74extern void reboot_setup(char *str);
1da177e4
LT
75
76unsigned int processor_id;
c18f6581 77EXPORT_SYMBOL(processor_id);
0385ebc0 78unsigned int __machine_arch_type __read_mostly;
1da177e4 79EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 80unsigned int cacheid __read_mostly;
c0e95878 81EXPORT_SYMBOL(cacheid);
1da177e4 82
9d20fdd5
BG
83unsigned int __atags_pointer __initdata;
84
1da177e4
LT
85unsigned int system_rev;
86EXPORT_SYMBOL(system_rev);
87
88unsigned int system_serial_low;
89EXPORT_SYMBOL(system_serial_low);
90
91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high);
93
0385ebc0 94unsigned int elf_hwcap __read_mostly;
1da177e4
LT
95EXPORT_SYMBOL(elf_hwcap);
96
97
98#ifdef MULTI_CPU
0385ebc0 99struct processor processor __read_mostly;
1da177e4
LT
100#endif
101#ifdef MULTI_TLB
0385ebc0 102struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_USER
0385ebc0 105struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_CACHE
0385ebc0 108struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 109#endif
953233dc 110#ifdef CONFIG_OUTER_CACHE
0385ebc0 111struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 112EXPORT_SYMBOL(outer_cache);
953233dc 113#endif
1da177e4 114
ccea7a19
RK
115struct stack {
116 u32 irq[3];
117 u32 abt[3];
118 u32 und[3];
119} ____cacheline_aligned;
120
121static struct stack stacks[NR_CPUS];
122
1da177e4
LT
123char elf_platform[ELF_PLATFORM_SIZE];
124EXPORT_SYMBOL(elf_platform);
125
1da177e4
LT
126static const char *cpu_name;
127static const char *machine_name;
48ab7e09 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 129struct machine_desc *machine_desc __initdata;
1da177e4
LT
130
131static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133#define ENDIANNESS ((char)endian_test.l)
134
135DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137/*
138 * Standard memory resources
139 */
140static struct resource mem_res[] = {
740e518e
GKH
141 {
142 .name = "Video RAM",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_MEM
146 },
147 {
148 .name = "Kernel text",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_MEM
152 },
153 {
154 .name = "Kernel data",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_MEM
158 }
1da177e4
LT
159};
160
161#define video_ram mem_res[0]
162#define kernel_code mem_res[1]
163#define kernel_data mem_res[2]
164
165static struct resource io_res[] = {
740e518e
GKH
166 {
167 .name = "reserved",
168 .start = 0x3bc,
169 .end = 0x3be,
170 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171 },
172 {
173 .name = "reserved",
174 .start = 0x378,
175 .end = 0x37f,
176 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 },
178 {
179 .name = "reserved",
180 .start = 0x278,
181 .end = 0x27f,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183 }
1da177e4
LT
184};
185
186#define lp0 io_res[0]
187#define lp1 io_res[1]
188#define lp2 io_res[2]
189
1da177e4
LT
190static const char *proc_arch[] = {
191 "undefined/unknown",
192 "3",
193 "4",
194 "4T",
195 "5",
196 "5T",
197 "5TE",
198 "5TEJ",
199 "6TEJ",
6b090a25 200 "7",
1da177e4
LT
201 "?(11)",
202 "?(12)",
203 "?(13)",
204 "?(14)",
205 "?(15)",
206 "?(16)",
207 "?(17)",
208};
209
1da177e4
LT
210int cpu_architecture(void)
211{
212 int cpu_arch;
213
0ba8b9b2 214 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 215 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
216 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
220 if (cpu_arch)
221 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 222 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
223 unsigned int mmfr0;
224
225 /* Revised CPUID format. Read the Memory Model Feature
226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0));
229 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
230 (mmfr0 & 0x000000f0) == 0x00000030)
231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020)
234 cpu_arch = CPU_ARCH_ARMv6;
235 else
236 cpu_arch = CPU_ARCH_UNKNOWN;
237 } else
238 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
239
240 return cpu_arch;
241}
242
8925ec4c
WD
243static int cpu_has_aliasing_icache(unsigned int arch)
244{
245 int aliasing_icache;
246 unsigned int id_reg, num_sets, line_size;
247
248 /* arch specifies the register format */
249 switch (arch) {
250 case CPU_ARCH_ARMv7:
5fb31a96
LW
251 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 : /* No output operands */
8925ec4c 253 : "r" (1));
5fb31a96
LW
254 isb();
255 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 : "=r" (id_reg));
8925ec4c
WD
257 line_size = 4 << ((id_reg & 0x7) + 2);
258 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260 break;
261 case CPU_ARCH_ARMv6:
262 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263 break;
264 default:
265 /* I-cache aliases will be handled by D-cache aliasing code */
266 aliasing_icache = 0;
267 }
268
269 return aliasing_icache;
270}
271
c0e95878
RK
272static void __init cacheid_init(void)
273{
274 unsigned int cachetype = read_cpuid_cachetype();
275 unsigned int arch = cpu_architecture();
276
b57ee99f
CM
277 if (arch >= CPU_ARCH_ARMv6) {
278 if ((cachetype & (7 << 29)) == 4 << 29) {
279 /* ARMv7 register format */
280 cacheid = CACHEID_VIPT_NONALIASING;
281 if ((cachetype & (3 << 14)) == 1 << 14)
282 cacheid |= CACHEID_ASID_TAGGED;
8925ec4c
WD
283 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284 cacheid |= CACHEID_VIPT_I_ALIASING;
285 } else if (cachetype & (1 << 23)) {
c0e95878 286 cacheid = CACHEID_VIPT_ALIASING;
8925ec4c 287 } else {
c0e95878 288 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c
WD
289 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290 cacheid |= CACHEID_VIPT_I_ALIASING;
291 }
c0e95878
RK
292 } else {
293 cacheid = CACHEID_VIVT;
294 }
2b4ae1f1
RK
295
296 printk("CPU: %s data cache, %s instruction cache\n",
297 cache_is_vivt() ? "VIVT" :
298 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 cache_is_vivt() ? "VIVT" :
301 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 302 icache_is_vipt_aliasing() ? "VIPT aliasing" :
2b4ae1f1 303 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
304}
305
1da177e4
LT
306/*
307 * These functions re-use the assembly code in head.S, which
308 * already provide the required functionality.
309 */
0f44ba1d 310extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54
RK
311
312static void __init early_print(const char *str, ...)
313{
314 extern void printascii(const char *);
315 char buf[256];
316 va_list ap;
317
318 va_start(ap, str);
319 vsnprintf(buf, sizeof(buf), str, ap);
320 va_end(ap);
321
322#ifdef CONFIG_DEBUG_LL
323 printascii(buf);
324#endif
325 printk("%s", buf);
326}
327
328static struct machine_desc * __init lookup_machine_type(unsigned int type)
329{
330 extern struct machine_desc __arch_info_begin[], __arch_info_end[];
331 struct machine_desc *p;
332
333 for (p = __arch_info_begin; p < __arch_info_end; p++)
334 if (type == p->nr)
335 return p;
336
337 early_print("\n"
338 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
339 "Available machine support:\n\nID (hex)\tNAME\n", type);
340
341 for (p = __arch_info_begin; p < __arch_info_end; p++)
342 early_print("%08x\t%s\n", p->nr, p->name);
343
344 early_print("\nPlease check your kernel config and/or bootloader.\n");
345
346 while (true)
347 /* can't use cpu_relax() here as it may require MMU setup */;
348}
1da177e4 349
f159f4ed
TL
350static void __init feat_v6_fixup(void)
351{
352 int id = read_cpuid_id();
353
354 if ((id & 0xff0f0000) != 0x41070000)
355 return;
356
357 /*
358 * HWCAP_TLS is available only on 1136 r1p0 and later,
359 * see also kuser_get_tls_init.
360 */
361 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
362 elf_hwcap &= ~HWCAP_TLS;
363}
364
1da177e4
LT
365static void __init setup_processor(void)
366{
367 struct proc_info_list *list;
368
369 /*
370 * locate processor in the list of supported processor
371 * types. The linker builds this table for us from the
372 * entries in arch/arm/mm/proc-*.S
373 */
0ba8b9b2 374 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
375 if (!list) {
376 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 377 "to continue.\n", read_cpuid_id());
1da177e4
LT
378 while (1);
379 }
380
381 cpu_name = list->cpu_name;
382
383#ifdef MULTI_CPU
384 processor = *list->proc;
385#endif
386#ifdef MULTI_TLB
387 cpu_tlb = *list->tlb;
388#endif
389#ifdef MULTI_USER
390 cpu_user = *list->user;
391#endif
392#ifdef MULTI_CACHE
393 cpu_cache = *list->cache;
394#endif
395
4e19025b 396 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 397 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 398 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 399
96b644bd 400 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
401 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
402 elf_hwcap = list->elf_hwcap;
adeff422
CM
403#ifndef CONFIG_ARM_THUMB
404 elf_hwcap &= ~HWCAP_THUMB;
405#endif
1da177e4 406
f159f4ed
TL
407 feat_v6_fixup();
408
c0e95878 409 cacheid_init();
1da177e4
LT
410 cpu_proc_init();
411}
412
ccea7a19
RK
413/*
414 * cpu_init - initialise one CPU.
415 *
90f1e084 416 * cpu_init sets up the per-CPU stacks.
ccea7a19 417 */
36c5ed23 418void cpu_init(void)
ccea7a19
RK
419{
420 unsigned int cpu = smp_processor_id();
421 struct stack *stk = &stacks[cpu];
422
423 if (cpu >= NR_CPUS) {
424 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
425 BUG();
426 }
427
b86040a5
CM
428 /*
429 * Define the placement constraint for the inline asm directive below.
430 * In Thumb-2, msr with an immediate value is not allowed.
431 */
432#ifdef CONFIG_THUMB2_KERNEL
433#define PLC "r"
434#else
435#define PLC "I"
436#endif
437
ccea7a19
RK
438 /*
439 * setup stacks for re-entrant exception handlers
440 */
441 __asm__ (
442 "msr cpsr_c, %1\n\t"
b86040a5
CM
443 "add r14, %0, %2\n\t"
444 "mov sp, r14\n\t"
ccea7a19 445 "msr cpsr_c, %3\n\t"
b86040a5
CM
446 "add r14, %0, %4\n\t"
447 "mov sp, r14\n\t"
ccea7a19 448 "msr cpsr_c, %5\n\t"
b86040a5
CM
449 "add r14, %0, %6\n\t"
450 "mov sp, r14\n\t"
ccea7a19
RK
451 "msr cpsr_c, %7"
452 :
453 : "r" (stk),
b86040a5 454 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 455 "I" (offsetof(struct stack, irq[0])),
b86040a5 456 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 457 "I" (offsetof(struct stack, abt[0])),
b86040a5 458 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 459 "I" (offsetof(struct stack, und[0])),
b86040a5 460 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 461 : "r14");
ccea7a19
RK
462}
463
1da177e4
LT
464static struct machine_desc * __init setup_machine(unsigned int nr)
465{
466 struct machine_desc *list;
467
468 /*
469 * locate machine in the list of supported machines.
470 */
471 list = lookup_machine_type(nr);
472 if (!list) {
473 printk("Machine configuration botched (nr %d), unable "
474 "to continue.\n", nr);
475 while (1);
476 }
477
478 printk("Machine: %s\n", list->name);
479
480 return list;
481}
482
4b5f32ce 483static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 484{
4b5f32ce
NP
485 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
486
487 if (meminfo.nr_banks >= NR_BANKS) {
488 printk(KERN_CRIT "NR_BANKS too low, "
489 "ignoring memory at %#lx\n", start);
490 return -EINVAL;
491 }
05f96ef1 492
3a669411
RK
493 /*
494 * Ensure that start/size are aligned to a page boundary.
495 * Size is appropriately rounded down, start is rounded up.
496 */
497 size -= start & ~PAGE_MASK;
05f96ef1
RK
498 bank->start = PAGE_ALIGN(start);
499 bank->size = size & PAGE_MASK;
4b5f32ce
NP
500
501 /*
502 * Check whether this memory region has non-zero size or
503 * invalid node number.
504 */
be370302 505 if (bank->size == 0)
4b5f32ce
NP
506 return -EINVAL;
507
508 meminfo.nr_banks++;
509 return 0;
3a669411
RK
510}
511
1da177e4
LT
512/*
513 * Pick out the memory size. We look for mem=size@start,
514 * where start and size are "size[KkMm]"
515 */
2b0d8c25 516static int __init early_mem(char *p)
1da177e4
LT
517{
518 static int usermem __initdata = 0;
519 unsigned long size, start;
2b0d8c25 520 char *endp;
1da177e4
LT
521
522 /*
523 * If the user specifies memory size, we
524 * blow away any automatically generated
525 * size.
526 */
527 if (usermem == 0) {
528 usermem = 1;
529 meminfo.nr_banks = 0;
530 }
531
532 start = PHYS_OFFSET;
2b0d8c25
JK
533 size = memparse(p, &endp);
534 if (*endp == '@')
535 start = memparse(endp + 1, NULL);
1da177e4 536
1c97b73e 537 arm_add_memory(start, size);
1da177e4 538
2b0d8c25 539 return 0;
1da177e4 540}
2b0d8c25 541early_param("mem", early_mem);
1da177e4
LT
542
543static void __init
544setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
545{
546#ifdef CONFIG_BLK_DEV_RAM
547 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
548
549 rd_image_start = image_start;
550 rd_prompt = prompt;
551 rd_doload = doload;
552
553 if (rd_sz)
554 rd_size = rd_sz;
555#endif
556}
557
11b9369c 558static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 559{
11b9369c 560 struct memblock_region *region;
1da177e4 561 struct resource *res;
1da177e4 562
37efe642
RK
563 kernel_code.start = virt_to_phys(_text);
564 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 565 kernel_data.start = virt_to_phys(_sdata);
37efe642 566 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 567
11b9369c 568 for_each_memblock(memory, region) {
1da177e4
LT
569 res = alloc_bootmem_low(sizeof(*res));
570 res->name = "System RAM";
11b9369c
DZ
571 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
572 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
573 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
574
575 request_resource(&iomem_resource, res);
576
577 if (kernel_code.start >= res->start &&
578 kernel_code.end <= res->end)
579 request_resource(res, &kernel_code);
580 if (kernel_data.start >= res->start &&
581 kernel_data.end <= res->end)
582 request_resource(res, &kernel_data);
583 }
584
585 if (mdesc->video_start) {
586 video_ram.start = mdesc->video_start;
587 video_ram.end = mdesc->video_end;
588 request_resource(&iomem_resource, &video_ram);
589 }
590
591 /*
592 * Some machines don't have the possibility of ever
593 * possessing lp0, lp1 or lp2
594 */
595 if (mdesc->reserve_lp0)
596 request_resource(&ioport_resource, &lp0);
597 if (mdesc->reserve_lp1)
598 request_resource(&ioport_resource, &lp1);
599 if (mdesc->reserve_lp2)
600 request_resource(&ioport_resource, &lp2);
601}
602
603/*
604 * Tag parsing.
605 *
606 * This is the new way of passing data to the kernel at boot time. Rather
607 * than passing a fixed inflexible structure to the kernel, we pass a list
608 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
609 * tag for the list to be recognised (to distinguish the tagged list from
610 * a param_struct). The list is terminated with a zero-length tag (this tag
611 * is not parsed in any way).
612 */
613static int __init parse_tag_core(const struct tag *tag)
614{
615 if (tag->hdr.size > 2) {
616 if ((tag->u.core.flags & 1) == 0)
617 root_mountflags &= ~MS_RDONLY;
618 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
619 }
620 return 0;
621}
622
623__tagtable(ATAG_CORE, parse_tag_core);
624
625static int __init parse_tag_mem32(const struct tag *tag)
626{
4b5f32ce 627 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
628}
629
630__tagtable(ATAG_MEM, parse_tag_mem32);
631
632#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
633struct screen_info screen_info = {
634 .orig_video_lines = 30,
635 .orig_video_cols = 80,
636 .orig_video_mode = 0,
637 .orig_video_ega_bx = 0,
638 .orig_video_isVGA = 1,
639 .orig_video_points = 8
640};
641
642static int __init parse_tag_videotext(const struct tag *tag)
643{
644 screen_info.orig_x = tag->u.videotext.x;
645 screen_info.orig_y = tag->u.videotext.y;
646 screen_info.orig_video_page = tag->u.videotext.video_page;
647 screen_info.orig_video_mode = tag->u.videotext.video_mode;
648 screen_info.orig_video_cols = tag->u.videotext.video_cols;
649 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
650 screen_info.orig_video_lines = tag->u.videotext.video_lines;
651 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
652 screen_info.orig_video_points = tag->u.videotext.video_points;
653 return 0;
654}
655
656__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
657#endif
658
659static int __init parse_tag_ramdisk(const struct tag *tag)
660{
661 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
662 (tag->u.ramdisk.flags & 2) == 0,
663 tag->u.ramdisk.start, tag->u.ramdisk.size);
664 return 0;
665}
666
667__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
668
1da177e4
LT
669static int __init parse_tag_serialnr(const struct tag *tag)
670{
671 system_serial_low = tag->u.serialnr.low;
672 system_serial_high = tag->u.serialnr.high;
673 return 0;
674}
675
676__tagtable(ATAG_SERIAL, parse_tag_serialnr);
677
678static int __init parse_tag_revision(const struct tag *tag)
679{
680 system_rev = tag->u.revision.rev;
681 return 0;
682}
683
684__tagtable(ATAG_REVISION, parse_tag_revision);
685
686static int __init parse_tag_cmdline(const struct tag *tag)
687{
22eeb8f6 688#ifndef CONFIG_CMDLINE_FORCE
1da177e4 689 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
22eeb8f6
AH
690#else
691 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
692#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
693 return 0;
694}
695
696__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
697
698/*
699 * Scan the tag table for this tag, and call its parse function.
700 * The tag table is built by the linker from all the __tagtable
701 * declarations.
702 */
703static int __init parse_tag(const struct tag *tag)
704{
705 extern struct tagtable __tagtable_begin, __tagtable_end;
706 struct tagtable *t;
707
708 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
709 if (tag->hdr.tag == t->tag) {
710 t->parse(tag);
711 break;
712 }
713
714 return t < &__tagtable_end;
715}
716
717/*
718 * Parse all tags in the list, checking both the global and architecture
719 * specific tag tables.
720 */
721static void __init parse_tags(const struct tag *t)
722{
723 for (; t->hdr.size; t = tag_next(t))
724 if (!parse_tag(t))
725 printk(KERN_WARNING
726 "Ignoring unrecognised tag 0x%08x\n",
727 t->hdr.tag);
728}
729
730/*
731 * This holds our defaults.
732 */
733static struct init_tags {
734 struct tag_header hdr1;
735 struct tag_core core;
736 struct tag_header hdr2;
737 struct tag_mem32 mem;
738 struct tag_header hdr3;
739} init_tags __initdata = {
740 { tag_size(tag_core), ATAG_CORE },
741 { 1, PAGE_SIZE, 0xff },
742 { tag_size(tag_mem32), ATAG_MEM },
b75c178a 743 { MEM_SIZE },
1da177e4
LT
744 { 0, ATAG_NONE }
745};
746
1da177e4
LT
747static int __init customize_machine(void)
748{
749 /* customizes platform devices, or adds new ones */
8ff1443c
RK
750 if (machine_desc->init_machine)
751 machine_desc->init_machine();
1da177e4
LT
752 return 0;
753}
754arch_initcall(customize_machine);
755
3c57fb43
MW
756#ifdef CONFIG_KEXEC
757static inline unsigned long long get_total_mem(void)
758{
759 unsigned long total;
760
761 total = max_low_pfn - min_low_pfn;
762 return total << PAGE_SHIFT;
763}
764
765/**
766 * reserve_crashkernel() - reserves memory are for crash kernel
767 *
768 * This function reserves memory area given in "crashkernel=" kernel command
769 * line parameter. The memory reserved is used by a dump capture kernel when
770 * primary kernel is crashing.
771 */
772static void __init reserve_crashkernel(void)
773{
774 unsigned long long crash_size, crash_base;
775 unsigned long long total_mem;
776 int ret;
777
778 total_mem = get_total_mem();
779 ret = parse_crashkernel(boot_command_line, total_mem,
780 &crash_size, &crash_base);
781 if (ret)
782 return;
783
784 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
785 if (ret < 0) {
786 printk(KERN_WARNING "crashkernel reservation failed - "
787 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
788 return;
789 }
790
791 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
792 "for crashkernel (System RAM: %ldMB)\n",
793 (unsigned long)(crash_size >> 20),
794 (unsigned long)(crash_base >> 20),
795 (unsigned long)(total_mem >> 20));
796
797 crashk_res.start = crash_base;
798 crashk_res.end = crash_base + crash_size - 1;
799 insert_resource(&iomem_resource, &crashk_res);
800}
801#else
802static inline void reserve_crashkernel(void) {}
803#endif /* CONFIG_KEXEC */
804
cea0bb1b
MW
805/*
806 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
807 * is_kdump_kernel() to determine if we are booting after a panic. Hence
808 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
809 */
810
811#ifdef CONFIG_CRASH_DUMP
812/*
813 * elfcorehdr= specifies the location of elf core header stored by the crashed
814 * kernel. This option will be passed by kexec loader to the capture kernel.
815 */
816static int __init setup_elfcorehdr(char *arg)
817{
818 char *end;
819
820 if (!arg)
821 return -EINVAL;
822
823 elfcorehdr_addr = memparse(arg, &end);
824 return end > arg ? 0 : -EINVAL;
825}
826early_param("elfcorehdr", setup_elfcorehdr);
827#endif /* CONFIG_CRASH_DUMP */
828
73a65b3f
UKK
829static void __init squash_mem_tags(struct tag *tag)
830{
831 for (; tag->hdr.size; tag = tag_next(tag))
832 if (tag->hdr.tag == ATAG_MEM)
833 tag->hdr.tag = ATAG_NONE;
834}
835
1da177e4
LT
836void __init setup_arch(char **cmdline_p)
837{
838 struct tag *tags = (struct tag *)&init_tags;
839 struct machine_desc *mdesc;
840 char *from = default_command_line;
841
b75c178a
RK
842 init_tags.mem.start = PHYS_OFFSET;
843
bff595c1
CM
844 unwind_init();
845
1da177e4
LT
846 setup_processor();
847 mdesc = setup_machine(machine_arch_type);
8ff1443c 848 machine_desc = mdesc;
1da177e4
LT
849 machine_name = mdesc->name;
850
851 if (mdesc->soft_reboot)
852 reboot_setup("s");
853
9d20fdd5
BG
854 if (__atags_pointer)
855 tags = phys_to_virt(__atags_pointer);
856 else if (mdesc->boot_params)
f9bd6ea4 857 tags = phys_to_virt(mdesc->boot_params);
1da177e4 858
73a65b3f 859#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
860 /*
861 * If we have the old style parameters, convert them to
862 * a tag list.
863 */
864 if (tags->hdr.tag != ATAG_CORE)
865 convert_to_tag_list(tags);
73a65b3f 866#endif
1da177e4
LT
867 if (tags->hdr.tag != ATAG_CORE)
868 tags = (struct tag *)&init_tags;
869
870 if (mdesc->fixup)
871 mdesc->fixup(mdesc, tags, &from, &meminfo);
872
873 if (tags->hdr.tag == ATAG_CORE) {
874 if (meminfo.nr_banks != 0)
875 squash_mem_tags(tags);
4cd9d6f7 876 save_atags(tags);
1da177e4
LT
877 parse_tags(tags);
878 }
879
37efe642
RK
880 init_mm.start_code = (unsigned long) _text;
881 init_mm.end_code = (unsigned long) _etext;
882 init_mm.end_data = (unsigned long) _edata;
883 init_mm.brk = (unsigned long) _end;
1da177e4 884
2b0d8c25
JK
885 /* parse_early_param needs a boot_command_line */
886 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
887
48ab7e09
JK
888 /* populate cmd_line too for later use, preserving boot_command_line */
889 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
890 *cmdline_p = cmd_line;
2b0d8c25
JK
891
892 parse_early_param();
893
8d717a52 894 arm_memblock_init(&meminfo, mdesc);
2778f620 895
4b5f32ce 896 paging_init(mdesc);
11b9369c 897 request_standard_resources(mdesc);
1da177e4 898
7bbb7940 899#ifdef CONFIG_SMP
f00ec48f
RK
900 if (is_smp())
901 smp_init_cpus();
7bbb7940 902#endif
3c57fb43 903 reserve_crashkernel();
7bbb7940 904
ccea7a19 905 cpu_init();
bc581770 906 tcm_init();
ccea7a19 907
52108641 908#ifdef CONFIG_MULTI_IRQ_HANDLER
909 handle_arch_irq = mdesc->handle_irq;
910#endif
1da177e4
LT
911
912#ifdef CONFIG_VT
913#if defined(CONFIG_VGA_CONSOLE)
914 conswitchp = &vga_con;
915#elif defined(CONFIG_DUMMY_CONSOLE)
916 conswitchp = &dummy_con;
917#endif
918#endif
5cbad0eb 919 early_trap_init();
dec12e62
RK
920
921 if (mdesc->init_early)
922 mdesc->init_early();
1da177e4
LT
923}
924
925
926static int __init topology_init(void)
927{
928 int cpu;
929
66fb8bd2
RK
930 for_each_possible_cpu(cpu) {
931 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
932 cpuinfo->cpu.hotpluggable = 1;
933 register_cpu(&cpuinfo->cpu, cpu);
934 }
1da177e4
LT
935
936 return 0;
937}
1da177e4
LT
938subsys_initcall(topology_init);
939
e119bfff
RK
940#ifdef CONFIG_HAVE_PROC_CPU
941static int __init proc_cpu_init(void)
942{
943 struct proc_dir_entry *res;
944
945 res = proc_mkdir("cpu", NULL);
946 if (!res)
947 return -ENOMEM;
948 return 0;
949}
950fs_initcall(proc_cpu_init);
951#endif
952
1da177e4
LT
953static const char *hwcap_str[] = {
954 "swp",
955 "half",
956 "thumb",
957 "26bit",
958 "fastmult",
959 "fpa",
960 "vfp",
961 "edsp",
962 "java",
8f7f9435 963 "iwmmxt",
99e4a6dd 964 "crunch",
4369ae16 965 "thumbee",
2bedbdf4 966 "neon",
7279dc3e
CM
967 "vfpv3",
968 "vfpv3d16",
1da177e4
LT
969 NULL
970};
971
1da177e4
LT
972static int c_show(struct seq_file *m, void *v)
973{
974 int i;
975
976 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 977 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
978
979#if defined(CONFIG_SMP)
980 for_each_online_cpu(i) {
15559722
RK
981 /*
982 * glibc reads /proc/cpuinfo to determine the number of
983 * online processors, looking for lines beginning with
984 * "processor". Give glibc what it expects.
985 */
986 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
987 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
988 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
989 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
990 }
991#else /* CONFIG_SMP */
992 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
993 loops_per_jiffy / (500000/HZ),
994 (loops_per_jiffy / (5000/HZ)) % 100);
995#endif
996
997 /* dump out the processor features */
998 seq_puts(m, "Features\t: ");
999
1000 for (i = 0; hwcap_str[i]; i++)
1001 if (elf_hwcap & (1 << i))
1002 seq_printf(m, "%s ", hwcap_str[i]);
1003
0ba8b9b2 1004 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
1005 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1006
0ba8b9b2 1007 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 1008 /* pre-ARM7 */
0ba8b9b2 1009 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 1010 } else {
0ba8b9b2 1011 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
1012 /* ARM7 */
1013 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 1014 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
1015 } else {
1016 /* post-ARM7 */
1017 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 1018 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
1019 }
1020 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 1021 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 1022 }
0ba8b9b2 1023 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 1024
1da177e4
LT
1025 seq_puts(m, "\n");
1026
1027 seq_printf(m, "Hardware\t: %s\n", machine_name);
1028 seq_printf(m, "Revision\t: %04x\n", system_rev);
1029 seq_printf(m, "Serial\t\t: %08x%08x\n",
1030 system_serial_high, system_serial_low);
1031
1032 return 0;
1033}
1034
1035static void *c_start(struct seq_file *m, loff_t *pos)
1036{
1037 return *pos < 1 ? (void *)1 : NULL;
1038}
1039
1040static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1041{
1042 ++*pos;
1043 return NULL;
1044}
1045
1046static void c_stop(struct seq_file *m, void *v)
1047{
1048}
1049
2ffd6e18 1050const struct seq_operations cpuinfo_op = {
1da177e4
LT
1051 .start = c_start,
1052 .next = c_next,
1053 .stop = c_stop,
1054 .show = c_show
1055};