]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/kernel/setup.c
ARM: 7862/1: pcpu: replace __get_cpu_var_uses
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
1da177e4 33
b86040a5 34#include <asm/unified.h>
15d07dc9 35#include <asm/cp15.h>
1da177e4 36#include <asm/cpu.h>
0ba8b9b2 37#include <asm/cputype.h>
1da177e4 38#include <asm/elf.h>
1da177e4 39#include <asm/procinfo.h>
05774088 40#include <asm/psci.h>
37efe642 41#include <asm/sections.h>
1da177e4 42#include <asm/setup.h>
f00ec48f 43#include <asm/smp_plat.h>
1da177e4
LT
44#include <asm/mach-types.h>
45#include <asm/cacheflush.h>
46097c7d 46#include <asm/cachetype.h>
1da177e4
LT
47#include <asm/tlbflush.h>
48
93c02ab4 49#include <asm/prom.h>
1da177e4
LT
50#include <asm/mach/arch.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
9f97da78
DH
53#include <asm/system_info.h>
54#include <asm/system_misc.h>
5cbad0eb 55#include <asm/traps.h>
bff595c1 56#include <asm/unwind.h>
1c16d242 57#include <asm/memblock.h>
4588c34d 58#include <asm/virt.h>
1da177e4 59
4cd9d6f7 60#include "atags.h"
0fc1c832 61
1da177e4
LT
62
63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64char fpe_type[8];
65
66static int __init fpe_setup(char *line)
67{
68 memcpy(fpe_type, line, 8);
69 return 1;
70}
71
72__setup("fpe=", fpe_setup);
73#endif
74
ff69a4c8 75extern void paging_init(const struct machine_desc *desc);
0371d3f7 76extern void sanity_check_meminfo(void);
16d6d5b0 77extern enum reboot_mode reboot_mode;
ff69a4c8 78extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
79
80unsigned int processor_id;
c18f6581 81EXPORT_SYMBOL(processor_id);
0385ebc0 82unsigned int __machine_arch_type __read_mostly;
1da177e4 83EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 84unsigned int cacheid __read_mostly;
c0e95878 85EXPORT_SYMBOL(cacheid);
1da177e4 86
9d20fdd5
BG
87unsigned int __atags_pointer __initdata;
88
1da177e4
LT
89unsigned int system_rev;
90EXPORT_SYMBOL(system_rev);
91
92unsigned int system_serial_low;
93EXPORT_SYMBOL(system_serial_low);
94
95unsigned int system_serial_high;
96EXPORT_SYMBOL(system_serial_high);
97
0385ebc0 98unsigned int elf_hwcap __read_mostly;
1da177e4
LT
99EXPORT_SYMBOL(elf_hwcap);
100
101
102#ifdef MULTI_CPU
0385ebc0 103struct processor processor __read_mostly;
1da177e4
LT
104#endif
105#ifdef MULTI_TLB
0385ebc0 106struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
107#endif
108#ifdef MULTI_USER
0385ebc0 109struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
110#endif
111#ifdef MULTI_CACHE
0385ebc0 112struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 113#endif
953233dc 114#ifdef CONFIG_OUTER_CACHE
0385ebc0 115struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 116EXPORT_SYMBOL(outer_cache);
953233dc 117#endif
1da177e4 118
2ecccf90
DM
119/*
120 * Cached cpu_architecture() result for use by assembler code.
121 * C code should use the cpu_architecture() function instead of accessing this
122 * variable directly.
123 */
124int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
ccea7a19
RK
126struct stack {
127 u32 irq[3];
128 u32 abt[3];
129 u32 und[3];
130} ____cacheline_aligned;
131
55bdd694 132#ifndef CONFIG_CPU_V7M
ccea7a19 133static struct stack stacks[NR_CPUS];
55bdd694 134#endif
ccea7a19 135
1da177e4
LT
136char elf_platform[ELF_PLATFORM_SIZE];
137EXPORT_SYMBOL(elf_platform);
138
1da177e4
LT
139static const char *cpu_name;
140static const char *machine_name;
48ab7e09 141static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 142const struct machine_desc *machine_desc __initdata;
1da177e4 143
1da177e4
LT
144static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
145#define ENDIANNESS ((char)endian_test.l)
146
147DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
148
149/*
150 * Standard memory resources
151 */
152static struct resource mem_res[] = {
740e518e
GKH
153 {
154 .name = "Video RAM",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_MEM
158 },
159 {
a36d8e5b 160 .name = "Kernel code",
740e518e
GKH
161 .start = 0,
162 .end = 0,
163 .flags = IORESOURCE_MEM
164 },
165 {
166 .name = "Kernel data",
167 .start = 0,
168 .end = 0,
169 .flags = IORESOURCE_MEM
170 }
1da177e4
LT
171};
172
173#define video_ram mem_res[0]
174#define kernel_code mem_res[1]
175#define kernel_data mem_res[2]
176
177static struct resource io_res[] = {
740e518e
GKH
178 {
179 .name = "reserved",
180 .start = 0x3bc,
181 .end = 0x3be,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183 },
184 {
185 .name = "reserved",
186 .start = 0x378,
187 .end = 0x37f,
188 .flags = IORESOURCE_IO | IORESOURCE_BUSY
189 },
190 {
191 .name = "reserved",
192 .start = 0x278,
193 .end = 0x27f,
194 .flags = IORESOURCE_IO | IORESOURCE_BUSY
195 }
1da177e4
LT
196};
197
198#define lp0 io_res[0]
199#define lp1 io_res[1]
200#define lp2 io_res[2]
201
1da177e4
LT
202static const char *proc_arch[] = {
203 "undefined/unknown",
204 "3",
205 "4",
206 "4T",
207 "5",
208 "5T",
209 "5TE",
210 "5TEJ",
211 "6TEJ",
6b090a25 212 "7",
55bdd694 213 "7M",
1da177e4
LT
214 "?(12)",
215 "?(13)",
216 "?(14)",
217 "?(15)",
218 "?(16)",
219 "?(17)",
220};
221
55bdd694
CM
222#ifdef CONFIG_CPU_V7M
223static int __get_cpu_architecture(void)
224{
225 return CPU_ARCH_ARMv7M;
226}
227#else
2ecccf90 228static int __get_cpu_architecture(void)
1da177e4
LT
229{
230 int cpu_arch;
231
0ba8b9b2 232 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 233 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
234 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
235 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
236 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
237 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
238 if (cpu_arch)
239 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 240 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
241 unsigned int mmfr0;
242
243 /* Revised CPUID format. Read the Memory Model Feature
244 * Register 0 and check for VMSAv7 or PMSAv7 */
245 asm("mrc p15, 0, %0, c0, c1, 4"
246 : "=r" (mmfr0));
315cfe78
CM
247 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
248 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
249 cpu_arch = CPU_ARCH_ARMv7;
250 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
251 (mmfr0 & 0x000000f0) == 0x00000020)
252 cpu_arch = CPU_ARCH_ARMv6;
253 else
254 cpu_arch = CPU_ARCH_UNKNOWN;
255 } else
256 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
257
258 return cpu_arch;
259}
55bdd694 260#endif
1da177e4 261
2ecccf90
DM
262int __pure cpu_architecture(void)
263{
264 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
265
266 return __cpu_architecture;
267}
268
8925ec4c
WD
269static int cpu_has_aliasing_icache(unsigned int arch)
270{
271 int aliasing_icache;
272 unsigned int id_reg, num_sets, line_size;
273
7f94e9cc
WD
274 /* PIPT caches never alias. */
275 if (icache_is_pipt())
276 return 0;
277
8925ec4c
WD
278 /* arch specifies the register format */
279 switch (arch) {
280 case CPU_ARCH_ARMv7:
5fb31a96
LW
281 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
282 : /* No output operands */
8925ec4c 283 : "r" (1));
5fb31a96
LW
284 isb();
285 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
286 : "=r" (id_reg));
8925ec4c
WD
287 line_size = 4 << ((id_reg & 0x7) + 2);
288 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
289 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
290 break;
291 case CPU_ARCH_ARMv6:
292 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
293 break;
294 default:
295 /* I-cache aliases will be handled by D-cache aliasing code */
296 aliasing_icache = 0;
297 }
298
299 return aliasing_icache;
300}
301
c0e95878
RK
302static void __init cacheid_init(void)
303{
c0e95878
RK
304 unsigned int arch = cpu_architecture();
305
55bdd694
CM
306 if (arch == CPU_ARCH_ARMv7M) {
307 cacheid = 0;
308 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 309 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
310 if ((cachetype & (7 << 29)) == 4 << 29) {
311 /* ARMv7 register format */
72dc53ac 312 arch = CPU_ARCH_ARMv7;
b57ee99f 313 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
314 switch (cachetype & (3 << 14)) {
315 case (1 << 14):
b57ee99f 316 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
317 break;
318 case (3 << 14):
319 cacheid |= CACHEID_PIPT;
320 break;
321 }
8925ec4c 322 } else {
72dc53ac
WD
323 arch = CPU_ARCH_ARMv6;
324 if (cachetype & (1 << 23))
325 cacheid = CACHEID_VIPT_ALIASING;
326 else
327 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 328 }
72dc53ac
WD
329 if (cpu_has_aliasing_icache(arch))
330 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
331 } else {
332 cacheid = CACHEID_VIVT;
333 }
2b4ae1f1
RK
334
335 printk("CPU: %s data cache, %s instruction cache\n",
336 cache_is_vivt() ? "VIVT" :
337 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 338 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
339 cache_is_vivt() ? "VIVT" :
340 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 341 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 342 icache_is_pipt() ? "PIPT" :
2b4ae1f1 343 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
344}
345
1da177e4
LT
346/*
347 * These functions re-use the assembly code in head.S, which
348 * already provide the required functionality.
349 */
0f44ba1d 350extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 351
93c02ab4 352void __init early_print(const char *str, ...)
6fc31d54
RK
353{
354 extern void printascii(const char *);
355 char buf[256];
356 va_list ap;
357
358 va_start(ap, str);
359 vsnprintf(buf, sizeof(buf), str, ap);
360 va_end(ap);
361
362#ifdef CONFIG_DEBUG_LL
363 printascii(buf);
364#endif
365 printk("%s", buf);
366}
367
8164f7af
SB
368static void __init cpuid_init_hwcaps(void)
369{
a469abd0 370 unsigned int divide_instrs, vmsa;
8164f7af
SB
371
372 if (cpu_architecture() < CPU_ARCH_ARMv7)
373 return;
374
375 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
376
377 switch (divide_instrs) {
378 case 2:
379 elf_hwcap |= HWCAP_IDIVA;
380 case 1:
381 elf_hwcap |= HWCAP_IDIVT;
382 }
a469abd0
WD
383
384 /* LPAE implies atomic ldrd/strd instructions */
385 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
386 if (vmsa >= 5)
387 elf_hwcap |= HWCAP_LPAE;
8164f7af
SB
388}
389
f159f4ed
TL
390static void __init feat_v6_fixup(void)
391{
392 int id = read_cpuid_id();
393
394 if ((id & 0xff0f0000) != 0x41070000)
395 return;
396
397 /*
398 * HWCAP_TLS is available only on 1136 r1p0 and later,
399 * see also kuser_get_tls_init.
400 */
401 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
402 elf_hwcap &= ~HWCAP_TLS;
403}
404
ccea7a19
RK
405/*
406 * cpu_init - initialise one CPU.
407 *
90f1e084 408 * cpu_init sets up the per-CPU stacks.
ccea7a19 409 */
1783d457 410void notrace cpu_init(void)
ccea7a19 411{
55bdd694 412#ifndef CONFIG_CPU_V7M
ccea7a19
RK
413 unsigned int cpu = smp_processor_id();
414 struct stack *stk = &stacks[cpu];
415
416 if (cpu >= NR_CPUS) {
417 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
418 BUG();
419 }
420
14318efb
RH
421 /*
422 * This only works on resume and secondary cores. For booting on the
423 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
424 */
425 set_my_cpu_offset(per_cpu_offset(cpu));
426
b69874e4
RK
427 cpu_proc_init();
428
b86040a5
CM
429 /*
430 * Define the placement constraint for the inline asm directive below.
431 * In Thumb-2, msr with an immediate value is not allowed.
432 */
433#ifdef CONFIG_THUMB2_KERNEL
434#define PLC "r"
435#else
436#define PLC "I"
437#endif
438
ccea7a19
RK
439 /*
440 * setup stacks for re-entrant exception handlers
441 */
442 __asm__ (
443 "msr cpsr_c, %1\n\t"
b86040a5
CM
444 "add r14, %0, %2\n\t"
445 "mov sp, r14\n\t"
ccea7a19 446 "msr cpsr_c, %3\n\t"
b86040a5
CM
447 "add r14, %0, %4\n\t"
448 "mov sp, r14\n\t"
ccea7a19 449 "msr cpsr_c, %5\n\t"
b86040a5
CM
450 "add r14, %0, %6\n\t"
451 "mov sp, r14\n\t"
ccea7a19
RK
452 "msr cpsr_c, %7"
453 :
454 : "r" (stk),
b86040a5 455 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 456 "I" (offsetof(struct stack, irq[0])),
b86040a5 457 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 458 "I" (offsetof(struct stack, abt[0])),
b86040a5 459 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 460 "I" (offsetof(struct stack, und[0])),
b86040a5 461 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 462 : "r14");
55bdd694 463#endif
ccea7a19
RK
464}
465
18d7f152 466u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
467
468void __init smp_setup_processor_id(void)
469{
470 int i;
cb8cf4f8
LP
471 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
472 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
473
474 cpu_logical_map(0) = cpu;
cb8cf4f8 475 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
476 cpu_logical_map(i) = i == cpu ? 0 : i;
477
9394c1c6
ML
478 /*
479 * clear __my_cpu_offset on boot CPU to avoid hang caused by
480 * using percpu variable early, for example, lockdep will
481 * access percpu variable inside lock_release
482 */
483 set_my_cpu_offset(0);
484
cb8cf4f8 485 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
486}
487
8cf72172
LP
488struct mpidr_hash mpidr_hash;
489#ifdef CONFIG_SMP
490/**
491 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
492 * level in order to build a linear index from an
493 * MPIDR value. Resulting algorithm is a collision
494 * free hash carried out through shifting and ORing
495 */
496static void __init smp_build_mpidr_hash(void)
497{
498 u32 i, affinity;
499 u32 fs[3], bits[3], ls, mask = 0;
500 /*
501 * Pre-scan the list of MPIDRS and filter out bits that do
502 * not contribute to affinity levels, ie they never toggle.
503 */
504 for_each_possible_cpu(i)
505 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
506 pr_debug("mask of set bits 0x%x\n", mask);
507 /*
508 * Find and stash the last and first bit set at all affinity levels to
509 * check how many bits are required to represent them.
510 */
511 for (i = 0; i < 3; i++) {
512 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
513 /*
514 * Find the MSB bit and LSB bits position
515 * to determine how many bits are required
516 * to express the affinity level.
517 */
518 ls = fls(affinity);
519 fs[i] = affinity ? ffs(affinity) - 1 : 0;
520 bits[i] = ls - fs[i];
521 }
522 /*
523 * An index can be created from the MPIDR by isolating the
524 * significant bits at each affinity level and by shifting
525 * them in order to compress the 24 bits values space to a
526 * compressed set of values. This is equivalent to hashing
527 * the MPIDR through shifting and ORing. It is a collision free
528 * hash though not minimal since some levels might contain a number
529 * of CPUs that is not an exact power of 2 and their bit
530 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
531 */
532 mpidr_hash.shift_aff[0] = fs[0];
533 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
534 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
535 (bits[1] + bits[0]);
536 mpidr_hash.mask = mask;
537 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
538 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
539 mpidr_hash.shift_aff[0],
540 mpidr_hash.shift_aff[1],
541 mpidr_hash.shift_aff[2],
542 mpidr_hash.mask,
543 mpidr_hash.bits);
544 /*
545 * 4x is an arbitrary value used to warn on a hash table much bigger
546 * than expected on most systems.
547 */
548 if (mpidr_hash_size() > 4 * num_possible_cpus())
549 pr_warn("Large number of MPIDR hash buckets detected\n");
550 sync_cache_w(&mpidr_hash);
551}
552#endif
553
b69874e4
RK
554static void __init setup_processor(void)
555{
556 struct proc_info_list *list;
557
558 /*
559 * locate processor in the list of supported processor
560 * types. The linker builds this table for us from the
561 * entries in arch/arm/mm/proc-*.S
562 */
563 list = lookup_processor_type(read_cpuid_id());
564 if (!list) {
565 printk("CPU configuration botched (ID %08x), unable "
566 "to continue.\n", read_cpuid_id());
567 while (1);
568 }
569
570 cpu_name = list->cpu_name;
2ecccf90 571 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
572
573#ifdef MULTI_CPU
574 processor = *list->proc;
575#endif
576#ifdef MULTI_TLB
577 cpu_tlb = *list->tlb;
578#endif
579#ifdef MULTI_USER
580 cpu_user = *list->user;
581#endif
582#ifdef MULTI_CACHE
583 cpu_cache = *list->cache;
584#endif
585
586 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
587 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
588 proc_arch[cpu_architecture()], cr_alignment);
589
a34dbfb0
WD
590 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
591 list->arch_name, ENDIANNESS);
592 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
593 list->elf_name, ENDIANNESS);
b69874e4 594 elf_hwcap = list->elf_hwcap;
8164f7af
SB
595
596 cpuid_init_hwcaps();
597
b69874e4 598#ifndef CONFIG_ARM_THUMB
c40e3641 599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4
RK
600#endif
601
92871b94
RH
602 erratum_a15_798181_init();
603
b69874e4
RK
604 feat_v6_fixup();
605
606 cacheid_init();
607 cpu_init();
608}
609
93c02ab4 610void __init dump_machine_table(void)
1da177e4 611{
ff69a4c8 612 const struct machine_desc *p;
1da177e4 613
6291319d
GL
614 early_print("Available machine support:\n\nID (hex)\tNAME\n");
615 for_each_machine_desc(p)
dce72dd0 616 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 617
dce72dd0 618 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 619
dce72dd0
NP
620 while (true)
621 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
622}
623
a5d5f7da 624int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 625{
4b5f32ce
NP
626 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
627
628 if (meminfo.nr_banks >= NR_BANKS) {
629 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 630 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
631 return -EINVAL;
632 }
05f96ef1 633
3a669411
RK
634 /*
635 * Ensure that start/size are aligned to a page boundary.
636 * Size is appropriately rounded down, start is rounded up.
637 */
638 size -= start & ~PAGE_MASK;
05f96ef1 639 bank->start = PAGE_ALIGN(start);
e5ab8580 640
4e1db26a 641#ifndef CONFIG_ARM_LPAE
e5ab8580
WD
642 if (bank->start + size < bank->start) {
643 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
644 "32-bit physical address space\n", (long long)start);
645 /*
646 * To ensure bank->start + bank->size is representable in
647 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
648 * This means we lose a page after masking.
649 */
650 size = ULONG_MAX - bank->start;
651 }
652#endif
653
a5d5f7da 654 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
655
656 /*
657 * Check whether this memory region has non-zero size or
658 * invalid node number.
659 */
be370302 660 if (bank->size == 0)
4b5f32ce
NP
661 return -EINVAL;
662
663 meminfo.nr_banks++;
664 return 0;
3a669411
RK
665}
666
1da177e4
LT
667/*
668 * Pick out the memory size. We look for mem=size@start,
669 * where start and size are "size[KkMm]"
670 */
2b0d8c25 671static int __init early_mem(char *p)
1da177e4
LT
672{
673 static int usermem __initdata = 0;
a5d5f7da 674 phys_addr_t size;
f60892d3 675 phys_addr_t start;
2b0d8c25 676 char *endp;
1da177e4
LT
677
678 /*
679 * If the user specifies memory size, we
680 * blow away any automatically generated
681 * size.
682 */
683 if (usermem == 0) {
684 usermem = 1;
685 meminfo.nr_banks = 0;
686 }
687
688 start = PHYS_OFFSET;
2b0d8c25
JK
689 size = memparse(p, &endp);
690 if (*endp == '@')
691 start = memparse(endp + 1, NULL);
1da177e4 692
1c97b73e 693 arm_add_memory(start, size);
1da177e4 694
2b0d8c25 695 return 0;
1da177e4 696}
2b0d8c25 697early_param("mem", early_mem);
1da177e4 698
ff69a4c8 699static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 700{
11b9369c 701 struct memblock_region *region;
1da177e4 702 struct resource *res;
1da177e4 703
37efe642
RK
704 kernel_code.start = virt_to_phys(_text);
705 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 706 kernel_data.start = virt_to_phys(_sdata);
37efe642 707 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 708
11b9369c 709 for_each_memblock(memory, region) {
1da177e4
LT
710 res = alloc_bootmem_low(sizeof(*res));
711 res->name = "System RAM";
11b9369c
DZ
712 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
713 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
714 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
715
716 request_resource(&iomem_resource, res);
717
718 if (kernel_code.start >= res->start &&
719 kernel_code.end <= res->end)
720 request_resource(res, &kernel_code);
721 if (kernel_data.start >= res->start &&
722 kernel_data.end <= res->end)
723 request_resource(res, &kernel_data);
724 }
725
726 if (mdesc->video_start) {
727 video_ram.start = mdesc->video_start;
728 video_ram.end = mdesc->video_end;
729 request_resource(&iomem_resource, &video_ram);
730 }
731
732 /*
733 * Some machines don't have the possibility of ever
734 * possessing lp0, lp1 or lp2
735 */
736 if (mdesc->reserve_lp0)
737 request_resource(&ioport_resource, &lp0);
738 if (mdesc->reserve_lp1)
739 request_resource(&ioport_resource, &lp1);
740 if (mdesc->reserve_lp2)
741 request_resource(&ioport_resource, &lp2);
742}
743
1da177e4
LT
744#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
745struct screen_info screen_info = {
746 .orig_video_lines = 30,
747 .orig_video_cols = 80,
748 .orig_video_mode = 0,
749 .orig_video_ega_bx = 0,
750 .orig_video_isVGA = 1,
751 .orig_video_points = 8
752};
4394c124 753#endif
1da177e4 754
1da177e4
LT
755static int __init customize_machine(void)
756{
883a106b
AB
757 /*
758 * customizes platform devices, or adds new ones
759 * On DT based machines, we fall back to populating the
760 * machine from the device tree, if no callback is provided,
761 * otherwise we would always need an init_machine callback.
762 */
8ff1443c
RK
763 if (machine_desc->init_machine)
764 machine_desc->init_machine();
883a106b
AB
765#ifdef CONFIG_OF
766 else
767 of_platform_populate(NULL, of_default_bus_match_table,
768 NULL, NULL);
769#endif
1da177e4
LT
770 return 0;
771}
772arch_initcall(customize_machine);
773
90de4137
SG
774static int __init init_machine_late(void)
775{
776 if (machine_desc->init_late)
777 machine_desc->init_late();
778 return 0;
779}
780late_initcall(init_machine_late);
781
3c57fb43
MW
782#ifdef CONFIG_KEXEC
783static inline unsigned long long get_total_mem(void)
784{
785 unsigned long total;
786
787 total = max_low_pfn - min_low_pfn;
788 return total << PAGE_SHIFT;
789}
790
791/**
792 * reserve_crashkernel() - reserves memory are for crash kernel
793 *
794 * This function reserves memory area given in "crashkernel=" kernel command
795 * line parameter. The memory reserved is used by a dump capture kernel when
796 * primary kernel is crashing.
797 */
798static void __init reserve_crashkernel(void)
799{
800 unsigned long long crash_size, crash_base;
801 unsigned long long total_mem;
802 int ret;
803
804 total_mem = get_total_mem();
805 ret = parse_crashkernel(boot_command_line, total_mem,
806 &crash_size, &crash_base);
807 if (ret)
808 return;
809
810 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
811 if (ret < 0) {
812 printk(KERN_WARNING "crashkernel reservation failed - "
813 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
814 return;
815 }
816
817 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
818 "for crashkernel (System RAM: %ldMB)\n",
819 (unsigned long)(crash_size >> 20),
820 (unsigned long)(crash_base >> 20),
821 (unsigned long)(total_mem >> 20));
822
823 crashk_res.start = crash_base;
824 crashk_res.end = crash_base + crash_size - 1;
825 insert_resource(&iomem_resource, &crashk_res);
826}
827#else
828static inline void reserve_crashkernel(void) {}
829#endif /* CONFIG_KEXEC */
830
27a3f0e9
NP
831static int __init meminfo_cmp(const void *_a, const void *_b)
832{
833 const struct membank *a = _a, *b = _b;
834 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
835 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
836}
6291319d 837
4588c34d
DM
838void __init hyp_mode_check(void)
839{
840#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
841 sync_boot_mode();
842
4588c34d
DM
843 if (is_hyp_mode_available()) {
844 pr_info("CPU: All CPU(s) started in HYP mode.\n");
845 pr_info("CPU: Virtualization extensions available.\n");
846 } else if (is_hyp_mode_mismatched()) {
847 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
848 __boot_cpu_mode & MODE_MASK);
849 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
850 } else
851 pr_info("CPU: All CPU(s) started in SVC mode.\n");
852#endif
853}
854
6291319d
GL
855void __init setup_arch(char **cmdline_p)
856{
ff69a4c8 857 const struct machine_desc *mdesc;
6291319d 858
6291319d 859 setup_processor();
93c02ab4
GL
860 mdesc = setup_machine_fdt(__atags_pointer);
861 if (!mdesc)
b8b499c8 862 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
863 machine_desc = mdesc;
864 machine_name = mdesc->name;
865
c7909509
MS
866 setup_dma_zone(mdesc);
867
16d6d5b0
RH
868 if (mdesc->reboot_mode != REBOOT_HARD)
869 reboot_mode = mdesc->reboot_mode;
6291319d 870
37efe642
RK
871 init_mm.start_code = (unsigned long) _text;
872 init_mm.end_code = (unsigned long) _etext;
873 init_mm.end_data = (unsigned long) _edata;
874 init_mm.brk = (unsigned long) _end;
1da177e4 875
48ab7e09
JK
876 /* populate cmd_line too for later use, preserving boot_command_line */
877 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
878 *cmdline_p = cmd_line;
2b0d8c25
JK
879
880 parse_early_param();
881
27a3f0e9 882 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 883 sanity_check_meminfo();
8d717a52 884 arm_memblock_init(&meminfo, mdesc);
2778f620 885
4b5f32ce 886 paging_init(mdesc);
11b9369c 887 request_standard_resources(mdesc);
1da177e4 888
a528721d
RK
889 if (mdesc->restart)
890 arm_pm_restart = mdesc->restart;
891
93c02ab4
GL
892 unflatten_device_tree();
893
5587164e 894 arm_dt_init_cpu_maps();
05774088 895 psci_init();
7bbb7940 896#ifdef CONFIG_SMP
abcee5fb 897 if (is_smp()) {
b382b940
JM
898 if (!mdesc->smp_init || !mdesc->smp_init()) {
899 if (psci_smp_available())
900 smp_set_ops(&psci_smp_ops);
901 else if (mdesc->smp)
902 smp_set_ops(mdesc->smp);
903 }
f00ec48f 904 smp_init_cpus();
8cf72172 905 smp_build_mpidr_hash();
abcee5fb 906 }
7bbb7940 907#endif
4588c34d
DM
908
909 if (!is_smp())
910 hyp_mode_check();
911
3c57fb43 912 reserve_crashkernel();
7bbb7940 913
52108641 914#ifdef CONFIG_MULTI_IRQ_HANDLER
915 handle_arch_irq = mdesc->handle_irq;
916#endif
1da177e4
LT
917
918#ifdef CONFIG_VT
919#if defined(CONFIG_VGA_CONSOLE)
920 conswitchp = &vga_con;
921#elif defined(CONFIG_DUMMY_CONSOLE)
922 conswitchp = &dummy_con;
923#endif
924#endif
dec12e62
RK
925
926 if (mdesc->init_early)
927 mdesc->init_early();
1da177e4
LT
928}
929
930
931static int __init topology_init(void)
932{
933 int cpu;
934
66fb8bd2
RK
935 for_each_possible_cpu(cpu) {
936 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
937 cpuinfo->cpu.hotpluggable = 1;
938 register_cpu(&cpuinfo->cpu, cpu);
939 }
1da177e4
LT
940
941 return 0;
942}
1da177e4
LT
943subsys_initcall(topology_init);
944
e119bfff
RK
945#ifdef CONFIG_HAVE_PROC_CPU
946static int __init proc_cpu_init(void)
947{
948 struct proc_dir_entry *res;
949
950 res = proc_mkdir("cpu", NULL);
951 if (!res)
952 return -ENOMEM;
953 return 0;
954}
955fs_initcall(proc_cpu_init);
956#endif
957
1da177e4
LT
958static const char *hwcap_str[] = {
959 "swp",
960 "half",
961 "thumb",
962 "26bit",
963 "fastmult",
964 "fpa",
965 "vfp",
966 "edsp",
967 "java",
8f7f9435 968 "iwmmxt",
99e4a6dd 969 "crunch",
4369ae16 970 "thumbee",
2bedbdf4 971 "neon",
7279dc3e
CM
972 "vfpv3",
973 "vfpv3d16",
254cdf8e
WD
974 "tls",
975 "vfpv4",
976 "idiva",
977 "idivt",
ab8d46c0 978 "vfpd32",
a469abd0 979 "lpae",
1da177e4
LT
980 NULL
981};
982
1da177e4
LT
983static int c_show(struct seq_file *m, void *v)
984{
b4b8f770
LP
985 int i, j;
986 u32 cpuid;
1da177e4 987
1da177e4 988 for_each_online_cpu(i) {
15559722
RK
989 /*
990 * glibc reads /proc/cpuinfo to determine the number of
991 * online processors, looking for lines beginning with
992 * "processor". Give glibc what it expects.
993 */
994 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
995 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
996 seq_printf(m, "model name\t: %s rev %d (%s)\n",
997 cpu_name, cpuid & 15, elf_platform);
998
b4b8f770
LP
999 /* dump out the processor features */
1000 seq_puts(m, "Features\t: ");
1da177e4 1001
b4b8f770
LP
1002 for (j = 0; hwcap_str[j]; j++)
1003 if (elf_hwcap & (1 << j))
1004 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1005
b4b8f770
LP
1006 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1007 seq_printf(m, "CPU architecture: %s\n",
1008 proc_arch[cpu_architecture()]);
1da177e4 1009
b4b8f770
LP
1010 if ((cpuid & 0x0008f000) == 0x00000000) {
1011 /* pre-ARM7 */
1012 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1013 } else {
b4b8f770
LP
1014 if ((cpuid & 0x0008f000) == 0x00007000) {
1015 /* ARM7 */
1016 seq_printf(m, "CPU variant\t: 0x%02x\n",
1017 (cpuid >> 16) & 127);
1018 } else {
1019 /* post-ARM7 */
1020 seq_printf(m, "CPU variant\t: 0x%x\n",
1021 (cpuid >> 20) & 15);
1022 }
1023 seq_printf(m, "CPU part\t: 0x%03x\n",
1024 (cpuid >> 4) & 0xfff);
1da177e4 1025 }
b4b8f770 1026 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1027 }
1da177e4
LT
1028
1029 seq_printf(m, "Hardware\t: %s\n", machine_name);
1030 seq_printf(m, "Revision\t: %04x\n", system_rev);
1031 seq_printf(m, "Serial\t\t: %08x%08x\n",
1032 system_serial_high, system_serial_low);
1033
1034 return 0;
1035}
1036
1037static void *c_start(struct seq_file *m, loff_t *pos)
1038{
1039 return *pos < 1 ? (void *)1 : NULL;
1040}
1041
1042static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1043{
1044 ++*pos;
1045 return NULL;
1046}
1047
1048static void c_stop(struct seq_file *m, void *v)
1049{
1050}
1051
2ffd6e18 1052const struct seq_operations cpuinfo_op = {
1da177e4
LT
1053 .start = c_start,
1054 .next = c_next,
1055 .stop = c_stop,
1056 .show = c_show
1057};