]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/kernel/setup.c
ARM: psci: boot_secondary: replace __pa with virt_to_idmap
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
af4dda73 21#include <linux/of_iommu.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
1da177e4 39#include <asm/elf.h>
1da177e4 40#include <asm/procinfo.h>
05774088 41#include <asm/psci.h>
37efe642 42#include <asm/sections.h>
1da177e4 43#include <asm/setup.h>
f00ec48f 44#include <asm/smp_plat.h>
1da177e4
LT
45#include <asm/mach-types.h>
46#include <asm/cacheflush.h>
46097c7d 47#include <asm/cachetype.h>
1da177e4 48#include <asm/tlbflush.h>
5882bfef 49#include <asm/xen/hypervisor.h>
1da177e4 50
93c02ab4 51#include <asm/prom.h>
1da177e4
LT
52#include <asm/mach/arch.h>
53#include <asm/mach/irq.h>
54#include <asm/mach/time.h>
9f97da78
DH
55#include <asm/system_info.h>
56#include <asm/system_misc.h>
5cbad0eb 57#include <asm/traps.h>
bff595c1 58#include <asm/unwind.h>
1c16d242 59#include <asm/memblock.h>
4588c34d 60#include <asm/virt.h>
1da177e4 61
4cd9d6f7 62#include "atags.h"
0fc1c832 63
1da177e4
LT
64
65#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
66char fpe_type[8];
67
68static int __init fpe_setup(char *line)
69{
70 memcpy(fpe_type, line, 8);
71 return 1;
72}
73
74__setup("fpe=", fpe_setup);
75#endif
76
ca8f0b0a 77extern void init_default_cache_policy(unsigned long);
ff69a4c8 78extern void paging_init(const struct machine_desc *desc);
1221ed10 79extern void early_paging_init(const struct machine_desc *);
0371d3f7 80extern void sanity_check_meminfo(void);
16d6d5b0 81extern enum reboot_mode reboot_mode;
ff69a4c8 82extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
83
84unsigned int processor_id;
c18f6581 85EXPORT_SYMBOL(processor_id);
0385ebc0 86unsigned int __machine_arch_type __read_mostly;
1da177e4 87EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 88unsigned int cacheid __read_mostly;
c0e95878 89EXPORT_SYMBOL(cacheid);
1da177e4 90
9d20fdd5
BG
91unsigned int __atags_pointer __initdata;
92
1da177e4
LT
93unsigned int system_rev;
94EXPORT_SYMBOL(system_rev);
95
3f599875
PK
96const char *system_serial;
97EXPORT_SYMBOL(system_serial);
98
1da177e4
LT
99unsigned int system_serial_low;
100EXPORT_SYMBOL(system_serial_low);
101
102unsigned int system_serial_high;
103EXPORT_SYMBOL(system_serial_high);
104
0385ebc0 105unsigned int elf_hwcap __read_mostly;
1da177e4
LT
106EXPORT_SYMBOL(elf_hwcap);
107
b342ea4e
AB
108unsigned int elf_hwcap2 __read_mostly;
109EXPORT_SYMBOL(elf_hwcap2);
110
1da177e4
LT
111
112#ifdef MULTI_CPU
0385ebc0 113struct processor processor __read_mostly;
1da177e4
LT
114#endif
115#ifdef MULTI_TLB
0385ebc0 116struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
117#endif
118#ifdef MULTI_USER
0385ebc0 119struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
120#endif
121#ifdef MULTI_CACHE
0385ebc0 122struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 123#endif
953233dc 124#ifdef CONFIG_OUTER_CACHE
0385ebc0 125struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 126EXPORT_SYMBOL(outer_cache);
953233dc 127#endif
1da177e4 128
2ecccf90
DM
129/*
130 * Cached cpu_architecture() result for use by assembler code.
131 * C code should use the cpu_architecture() function instead of accessing this
132 * variable directly.
133 */
134int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
135
ccea7a19
RK
136struct stack {
137 u32 irq[3];
138 u32 abt[3];
139 u32 und[3];
c0e7f7ee 140 u32 fiq[3];
ccea7a19
RK
141} ____cacheline_aligned;
142
55bdd694 143#ifndef CONFIG_CPU_V7M
ccea7a19 144static struct stack stacks[NR_CPUS];
55bdd694 145#endif
ccea7a19 146
1da177e4
LT
147char elf_platform[ELF_PLATFORM_SIZE];
148EXPORT_SYMBOL(elf_platform);
149
1da177e4
LT
150static const char *cpu_name;
151static const char *machine_name;
48ab7e09 152static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 153const struct machine_desc *machine_desc __initdata;
1da177e4 154
1da177e4
LT
155static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
156#define ENDIANNESS ((char)endian_test.l)
157
158DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
159
160/*
161 * Standard memory resources
162 */
163static struct resource mem_res[] = {
740e518e
GKH
164 {
165 .name = "Video RAM",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_MEM
169 },
170 {
a36d8e5b 171 .name = "Kernel code",
740e518e
GKH
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_MEM
175 },
176 {
177 .name = "Kernel data",
178 .start = 0,
179 .end = 0,
180 .flags = IORESOURCE_MEM
181 }
1da177e4
LT
182};
183
184#define video_ram mem_res[0]
185#define kernel_code mem_res[1]
186#define kernel_data mem_res[2]
187
188static struct resource io_res[] = {
740e518e
GKH
189 {
190 .name = "reserved",
191 .start = 0x3bc,
192 .end = 0x3be,
193 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 },
195 {
196 .name = "reserved",
197 .start = 0x378,
198 .end = 0x37f,
199 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 },
201 {
202 .name = "reserved",
203 .start = 0x278,
204 .end = 0x27f,
205 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 }
1da177e4
LT
207};
208
209#define lp0 io_res[0]
210#define lp1 io_res[1]
211#define lp2 io_res[2]
212
1da177e4
LT
213static const char *proc_arch[] = {
214 "undefined/unknown",
215 "3",
216 "4",
217 "4T",
218 "5",
219 "5T",
220 "5TE",
221 "5TEJ",
222 "6TEJ",
6b090a25 223 "7",
55bdd694 224 "7M",
1da177e4
LT
225 "?(12)",
226 "?(13)",
227 "?(14)",
228 "?(15)",
229 "?(16)",
230 "?(17)",
231};
232
55bdd694
CM
233#ifdef CONFIG_CPU_V7M
234static int __get_cpu_architecture(void)
235{
236 return CPU_ARCH_ARMv7M;
237}
238#else
2ecccf90 239static int __get_cpu_architecture(void)
1da177e4
LT
240{
241 int cpu_arch;
242
0ba8b9b2 243 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 244 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
245 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
246 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
247 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
248 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
249 if (cpu_arch)
250 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 251 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
252 /* Revised CPUID format. Read the Memory Model Feature
253 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 254 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
265
266 return cpu_arch;
267}
55bdd694 268#endif
1da177e4 269
2ecccf90
DM
270int __pure cpu_architecture(void)
271{
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275}
276
8925ec4c
WD
277static int cpu_has_aliasing_icache(unsigned int arch)
278{
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
7f94e9cc
WD
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
8925ec4c
WD
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
5fb31a96
LW
289 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
290 : /* No output operands */
8925ec4c 291 : "r" (1));
5fb31a96
LW
292 isb();
293 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
294 : "=r" (id_reg));
8925ec4c
WD
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308}
309
c0e95878
RK
310static void __init cacheid_init(void)
311{
c0e95878
RK
312 unsigned int arch = cpu_architecture();
313
55bdd694
CM
314 if (arch == CPU_ARCH_ARMv7M) {
315 cacheid = 0;
316 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 317 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
318 if ((cachetype & (7 << 29)) == 4 << 29) {
319 /* ARMv7 register format */
72dc53ac 320 arch = CPU_ARCH_ARMv7;
b57ee99f 321 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
322 switch (cachetype & (3 << 14)) {
323 case (1 << 14):
b57ee99f 324 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
325 break;
326 case (3 << 14):
327 cacheid |= CACHEID_PIPT;
328 break;
329 }
8925ec4c 330 } else {
72dc53ac
WD
331 arch = CPU_ARCH_ARMv6;
332 if (cachetype & (1 << 23))
333 cacheid = CACHEID_VIPT_ALIASING;
334 else
335 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 336 }
72dc53ac
WD
337 if (cpu_has_aliasing_icache(arch))
338 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
339 } else {
340 cacheid = CACHEID_VIVT;
341 }
2b4ae1f1 342
1b0f6681 343 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
344 cache_is_vivt() ? "VIVT" :
345 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 346 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
347 cache_is_vivt() ? "VIVT" :
348 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 349 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 350 icache_is_pipt() ? "PIPT" :
2b4ae1f1 351 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
352}
353
1da177e4
LT
354/*
355 * These functions re-use the assembly code in head.S, which
356 * already provide the required functionality.
357 */
0f44ba1d 358extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 359
93c02ab4 360void __init early_print(const char *str, ...)
6fc31d54
RK
361{
362 extern void printascii(const char *);
363 char buf[256];
364 va_list ap;
365
366 va_start(ap, str);
367 vsnprintf(buf, sizeof(buf), str, ap);
368 va_end(ap);
369
370#ifdef CONFIG_DEBUG_LL
371 printascii(buf);
372#endif
373 printk("%s", buf);
374}
375
8164f7af
SB
376static void __init cpuid_init_hwcaps(void)
377{
b8c9592b 378 int block;
a092aedb 379 u32 isar5;
8164f7af
SB
380
381 if (cpu_architecture() < CPU_ARCH_ARMv7)
382 return;
383
b8c9592b
AB
384 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
385 if (block >= 2)
8164f7af 386 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 387 if (block >= 1)
8164f7af 388 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
389
390 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
391 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
392 if (block >= 5)
a469abd0 393 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
394
395 /* check for supported v8 Crypto instructions */
396 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
397
398 block = cpuid_feature_extract_field(isar5, 4);
399 if (block >= 2)
400 elf_hwcap2 |= HWCAP2_PMULL;
401 if (block >= 1)
402 elf_hwcap2 |= HWCAP2_AES;
403
404 block = cpuid_feature_extract_field(isar5, 8);
405 if (block >= 1)
406 elf_hwcap2 |= HWCAP2_SHA1;
407
408 block = cpuid_feature_extract_field(isar5, 12);
409 if (block >= 1)
410 elf_hwcap2 |= HWCAP2_SHA2;
411
412 block = cpuid_feature_extract_field(isar5, 16);
413 if (block >= 1)
414 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
415}
416
58171bf2 417static void __init elf_hwcap_fixup(void)
f159f4ed 418{
58171bf2 419 unsigned id = read_cpuid_id();
f159f4ed
TL
420
421 /*
422 * HWCAP_TLS is available only on 1136 r1p0 and later,
423 * see also kuser_get_tls_init.
424 */
58171bf2
RK
425 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
426 ((id >> 20) & 3) == 0) {
f159f4ed 427 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
428 return;
429 }
430
431 /* Verify if CPUID scheme is implemented */
432 if ((id & 0x000f0000) != 0x000f0000)
433 return;
434
435 /*
436 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
437 * avoid advertising SWP; it may not be atomic with
438 * multiprocessing cores.
439 */
b8c9592b
AB
440 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
441 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
442 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 443 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
444}
445
ccea7a19
RK
446/*
447 * cpu_init - initialise one CPU.
448 *
90f1e084 449 * cpu_init sets up the per-CPU stacks.
ccea7a19 450 */
1783d457 451void notrace cpu_init(void)
ccea7a19 452{
55bdd694 453#ifndef CONFIG_CPU_V7M
ccea7a19
RK
454 unsigned int cpu = smp_processor_id();
455 struct stack *stk = &stacks[cpu];
456
457 if (cpu >= NR_CPUS) {
1b0f6681 458 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
459 BUG();
460 }
461
14318efb
RH
462 /*
463 * This only works on resume and secondary cores. For booting on the
464 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
465 */
466 set_my_cpu_offset(per_cpu_offset(cpu));
467
b69874e4
RK
468 cpu_proc_init();
469
b86040a5
CM
470 /*
471 * Define the placement constraint for the inline asm directive below.
472 * In Thumb-2, msr with an immediate value is not allowed.
473 */
474#ifdef CONFIG_THUMB2_KERNEL
475#define PLC "r"
476#else
477#define PLC "I"
478#endif
479
ccea7a19
RK
480 /*
481 * setup stacks for re-entrant exception handlers
482 */
483 __asm__ (
484 "msr cpsr_c, %1\n\t"
b86040a5
CM
485 "add r14, %0, %2\n\t"
486 "mov sp, r14\n\t"
ccea7a19 487 "msr cpsr_c, %3\n\t"
b86040a5
CM
488 "add r14, %0, %4\n\t"
489 "mov sp, r14\n\t"
ccea7a19 490 "msr cpsr_c, %5\n\t"
b86040a5
CM
491 "add r14, %0, %6\n\t"
492 "mov sp, r14\n\t"
c0e7f7ee
DT
493 "msr cpsr_c, %7\n\t"
494 "add r14, %0, %8\n\t"
495 "mov sp, r14\n\t"
496 "msr cpsr_c, %9"
ccea7a19
RK
497 :
498 : "r" (stk),
b86040a5 499 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 500 "I" (offsetof(struct stack, irq[0])),
b86040a5 501 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 502 "I" (offsetof(struct stack, abt[0])),
b86040a5 503 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 504 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
505 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
506 "I" (offsetof(struct stack, fiq[0])),
b86040a5 507 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 508 : "r14");
55bdd694 509#endif
ccea7a19
RK
510}
511
18d7f152 512u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
513
514void __init smp_setup_processor_id(void)
515{
516 int i;
cb8cf4f8
LP
517 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
518 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
519
520 cpu_logical_map(0) = cpu;
cb8cf4f8 521 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
522 cpu_logical_map(i) = i == cpu ? 0 : i;
523
9394c1c6
ML
524 /*
525 * clear __my_cpu_offset on boot CPU to avoid hang caused by
526 * using percpu variable early, for example, lockdep will
527 * access percpu variable inside lock_release
528 */
529 set_my_cpu_offset(0);
530
1b0f6681 531 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
532}
533
8cf72172
LP
534struct mpidr_hash mpidr_hash;
535#ifdef CONFIG_SMP
536/**
537 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
538 * level in order to build a linear index from an
539 * MPIDR value. Resulting algorithm is a collision
540 * free hash carried out through shifting and ORing
541 */
542static void __init smp_build_mpidr_hash(void)
543{
544 u32 i, affinity;
545 u32 fs[3], bits[3], ls, mask = 0;
546 /*
547 * Pre-scan the list of MPIDRS and filter out bits that do
548 * not contribute to affinity levels, ie they never toggle.
549 */
550 for_each_possible_cpu(i)
551 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
552 pr_debug("mask of set bits 0x%x\n", mask);
553 /*
554 * Find and stash the last and first bit set at all affinity levels to
555 * check how many bits are required to represent them.
556 */
557 for (i = 0; i < 3; i++) {
558 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
559 /*
560 * Find the MSB bit and LSB bits position
561 * to determine how many bits are required
562 * to express the affinity level.
563 */
564 ls = fls(affinity);
565 fs[i] = affinity ? ffs(affinity) - 1 : 0;
566 bits[i] = ls - fs[i];
567 }
568 /*
569 * An index can be created from the MPIDR by isolating the
570 * significant bits at each affinity level and by shifting
571 * them in order to compress the 24 bits values space to a
572 * compressed set of values. This is equivalent to hashing
573 * the MPIDR through shifting and ORing. It is a collision free
574 * hash though not minimal since some levels might contain a number
575 * of CPUs that is not an exact power of 2 and their bit
576 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
577 */
578 mpidr_hash.shift_aff[0] = fs[0];
579 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
580 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
581 (bits[1] + bits[0]);
582 mpidr_hash.mask = mask;
583 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
584 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
585 mpidr_hash.shift_aff[0],
586 mpidr_hash.shift_aff[1],
587 mpidr_hash.shift_aff[2],
588 mpidr_hash.mask,
589 mpidr_hash.bits);
590 /*
591 * 4x is an arbitrary value used to warn on a hash table much bigger
592 * than expected on most systems.
593 */
594 if (mpidr_hash_size() > 4 * num_possible_cpus())
595 pr_warn("Large number of MPIDR hash buckets detected\n");
596 sync_cache_w(&mpidr_hash);
597}
598#endif
599
b69874e4
RK
600static void __init setup_processor(void)
601{
602 struct proc_info_list *list;
603
604 /*
605 * locate processor in the list of supported processor
606 * types. The linker builds this table for us from the
607 * entries in arch/arm/mm/proc-*.S
608 */
609 list = lookup_processor_type(read_cpuid_id());
610 if (!list) {
1b0f6681
OJ
611 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
612 read_cpuid_id());
b69874e4
RK
613 while (1);
614 }
615
616 cpu_name = list->cpu_name;
2ecccf90 617 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
618
619#ifdef MULTI_CPU
620 processor = *list->proc;
621#endif
622#ifdef MULTI_TLB
623 cpu_tlb = *list->tlb;
624#endif
625#ifdef MULTI_USER
626 cpu_user = *list->user;
627#endif
628#ifdef MULTI_CACHE
629 cpu_cache = *list->cache;
630#endif
631
1b0f6681
OJ
632 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
633 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 634 proc_arch[cpu_architecture()], get_cr());
b69874e4 635
a34dbfb0
WD
636 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
637 list->arch_name, ENDIANNESS);
638 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
639 list->elf_name, ENDIANNESS);
b69874e4 640 elf_hwcap = list->elf_hwcap;
8164f7af
SB
641
642 cpuid_init_hwcaps();
643
b69874e4 644#ifndef CONFIG_ARM_THUMB
c40e3641 645 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 646#endif
ca8f0b0a
RK
647#ifdef CONFIG_MMU
648 init_default_cache_policy(list->__cpu_mm_mmu_flags);
649#endif
92871b94
RH
650 erratum_a15_798181_init();
651
58171bf2 652 elf_hwcap_fixup();
b69874e4
RK
653
654 cacheid_init();
655 cpu_init();
656}
657
93c02ab4 658void __init dump_machine_table(void)
1da177e4 659{
ff69a4c8 660 const struct machine_desc *p;
1da177e4 661
6291319d
GL
662 early_print("Available machine support:\n\nID (hex)\tNAME\n");
663 for_each_machine_desc(p)
dce72dd0 664 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 665
dce72dd0 666 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 667
dce72dd0
NP
668 while (true)
669 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
670}
671
6a5014aa 672int __init arm_add_memory(u64 start, u64 size)
3a669411 673{
6d7d5da7 674 u64 aligned_start;
4b5f32ce 675
3a669411
RK
676 /*
677 * Ensure that start/size are aligned to a page boundary.
909ba297 678 * Size is rounded down, start is rounded up.
3a669411 679 */
6d7d5da7 680 aligned_start = PAGE_ALIGN(start);
909ba297
MY
681 if (aligned_start > start + size)
682 size = 0;
683 else
684 size -= aligned_start - start;
e5ab8580 685
6d7d5da7
MD
686#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
687 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
688 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
689 (long long)start);
6d7d5da7
MD
690 return -EINVAL;
691 }
692
693 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
694 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
695 (long long)start);
e5ab8580
WD
696 /*
697 * To ensure bank->start + bank->size is representable in
698 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
699 * This means we lose a page after masking.
700 */
6d7d5da7 701 size = ULONG_MAX - aligned_start;
e5ab8580
WD
702 }
703#endif
704
571b1437
RK
705 if (aligned_start < PHYS_OFFSET) {
706 if (aligned_start + size <= PHYS_OFFSET) {
707 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
708 aligned_start, aligned_start + size);
709 return -EINVAL;
710 }
711
712 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
713 aligned_start, (u64)PHYS_OFFSET);
714
715 size -= PHYS_OFFSET - aligned_start;
716 aligned_start = PHYS_OFFSET;
717 }
718
1c2f87c2
LA
719 start = aligned_start;
720 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
721
722 /*
723 * Check whether this memory region has non-zero size or
724 * invalid node number.
725 */
1c2f87c2 726 if (size == 0)
4b5f32ce
NP
727 return -EINVAL;
728
1c2f87c2 729 memblock_add(start, size);
4b5f32ce 730 return 0;
3a669411
RK
731}
732
1da177e4
LT
733/*
734 * Pick out the memory size. We look for mem=size@start,
735 * where start and size are "size[KkMm]"
736 */
1c2f87c2 737
2b0d8c25 738static int __init early_mem(char *p)
1da177e4
LT
739{
740 static int usermem __initdata = 0;
6a5014aa
MD
741 u64 size;
742 u64 start;
2b0d8c25 743 char *endp;
1da177e4
LT
744
745 /*
746 * If the user specifies memory size, we
747 * blow away any automatically generated
748 * size.
749 */
750 if (usermem == 0) {
751 usermem = 1;
1c2f87c2
LA
752 memblock_remove(memblock_start_of_DRAM(),
753 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
754 }
755
756 start = PHYS_OFFSET;
2b0d8c25
JK
757 size = memparse(p, &endp);
758 if (*endp == '@')
759 start = memparse(endp + 1, NULL);
1da177e4 760
1c97b73e 761 arm_add_memory(start, size);
1da177e4 762
2b0d8c25 763 return 0;
1da177e4 764}
2b0d8c25 765early_param("mem", early_mem);
1da177e4 766
ff69a4c8 767static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 768{
11b9369c 769 struct memblock_region *region;
1da177e4 770 struct resource *res;
1da177e4 771
37efe642
RK
772 kernel_code.start = virt_to_phys(_text);
773 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 774 kernel_data.start = virt_to_phys(_sdata);
37efe642 775 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 776
11b9369c 777 for_each_memblock(memory, region) {
ca474408 778 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 779 res->name = "System RAM";
11b9369c
DZ
780 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
781 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
782 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
783
784 request_resource(&iomem_resource, res);
785
786 if (kernel_code.start >= res->start &&
787 kernel_code.end <= res->end)
788 request_resource(res, &kernel_code);
789 if (kernel_data.start >= res->start &&
790 kernel_data.end <= res->end)
791 request_resource(res, &kernel_data);
792 }
793
794 if (mdesc->video_start) {
795 video_ram.start = mdesc->video_start;
796 video_ram.end = mdesc->video_end;
797 request_resource(&iomem_resource, &video_ram);
798 }
799
800 /*
801 * Some machines don't have the possibility of ever
802 * possessing lp0, lp1 or lp2
803 */
804 if (mdesc->reserve_lp0)
805 request_resource(&ioport_resource, &lp0);
806 if (mdesc->reserve_lp1)
807 request_resource(&ioport_resource, &lp1);
808 if (mdesc->reserve_lp2)
809 request_resource(&ioport_resource, &lp2);
810}
811
1da177e4
LT
812#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
813struct screen_info screen_info = {
814 .orig_video_lines = 30,
815 .orig_video_cols = 80,
816 .orig_video_mode = 0,
817 .orig_video_ega_bx = 0,
818 .orig_video_isVGA = 1,
819 .orig_video_points = 8
820};
4394c124 821#endif
1da177e4 822
1da177e4
LT
823static int __init customize_machine(void)
824{
883a106b
AB
825 /*
826 * customizes platform devices, or adds new ones
827 * On DT based machines, we fall back to populating the
828 * machine from the device tree, if no callback is provided,
829 * otherwise we would always need an init_machine callback.
830 */
af4dda73 831 of_iommu_init();
8ff1443c
RK
832 if (machine_desc->init_machine)
833 machine_desc->init_machine();
883a106b
AB
834#ifdef CONFIG_OF
835 else
836 of_platform_populate(NULL, of_default_bus_match_table,
837 NULL, NULL);
838#endif
1da177e4
LT
839 return 0;
840}
841arch_initcall(customize_machine);
842
90de4137
SG
843static int __init init_machine_late(void)
844{
3f599875
PK
845 struct device_node *root;
846 int ret;
847
90de4137
SG
848 if (machine_desc->init_late)
849 machine_desc->init_late();
3f599875
PK
850
851 root = of_find_node_by_path("/");
852 if (root) {
853 ret = of_property_read_string(root, "serial-number",
854 &system_serial);
855 if (ret)
856 system_serial = NULL;
857 }
858
859 if (!system_serial)
860 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
861 system_serial_high,
862 system_serial_low);
863
90de4137
SG
864 return 0;
865}
866late_initcall(init_machine_late);
867
3c57fb43
MW
868#ifdef CONFIG_KEXEC
869static inline unsigned long long get_total_mem(void)
870{
871 unsigned long total;
872
873 total = max_low_pfn - min_low_pfn;
874 return total << PAGE_SHIFT;
875}
876
877/**
878 * reserve_crashkernel() - reserves memory are for crash kernel
879 *
880 * This function reserves memory area given in "crashkernel=" kernel command
881 * line parameter. The memory reserved is used by a dump capture kernel when
882 * primary kernel is crashing.
883 */
884static void __init reserve_crashkernel(void)
885{
886 unsigned long long crash_size, crash_base;
887 unsigned long long total_mem;
888 int ret;
889
890 total_mem = get_total_mem();
891 ret = parse_crashkernel(boot_command_line, total_mem,
892 &crash_size, &crash_base);
893 if (ret)
894 return;
895
84f452b1 896 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 897 if (ret < 0) {
1b0f6681
OJ
898 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
899 (unsigned long)crash_base);
3c57fb43
MW
900 return;
901 }
902
1b0f6681
OJ
903 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
904 (unsigned long)(crash_size >> 20),
905 (unsigned long)(crash_base >> 20),
906 (unsigned long)(total_mem >> 20));
3c57fb43
MW
907
908 crashk_res.start = crash_base;
909 crashk_res.end = crash_base + crash_size - 1;
910 insert_resource(&iomem_resource, &crashk_res);
911}
912#else
913static inline void reserve_crashkernel(void) {}
914#endif /* CONFIG_KEXEC */
915
4588c34d
DM
916void __init hyp_mode_check(void)
917{
918#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
919 sync_boot_mode();
920
4588c34d
DM
921 if (is_hyp_mode_available()) {
922 pr_info("CPU: All CPU(s) started in HYP mode.\n");
923 pr_info("CPU: Virtualization extensions available.\n");
924 } else if (is_hyp_mode_mismatched()) {
925 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
926 __boot_cpu_mode & MODE_MASK);
927 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
928 } else
929 pr_info("CPU: All CPU(s) started in SVC mode.\n");
930#endif
931}
932
6291319d
GL
933void __init setup_arch(char **cmdline_p)
934{
ff69a4c8 935 const struct machine_desc *mdesc;
6291319d 936
6291319d 937 setup_processor();
93c02ab4
GL
938 mdesc = setup_machine_fdt(__atags_pointer);
939 if (!mdesc)
b8b499c8 940 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
941 machine_desc = mdesc;
942 machine_name = mdesc->name;
719c9d14 943 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 944
16d6d5b0
RH
945 if (mdesc->reboot_mode != REBOOT_HARD)
946 reboot_mode = mdesc->reboot_mode;
6291319d 947
37efe642
RK
948 init_mm.start_code = (unsigned long) _text;
949 init_mm.end_code = (unsigned long) _etext;
950 init_mm.end_data = (unsigned long) _edata;
951 init_mm.brk = (unsigned long) _end;
1da177e4 952
48ab7e09
JK
953 /* populate cmd_line too for later use, preserving boot_command_line */
954 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
955 *cmdline_p = cmd_line;
2b0d8c25
JK
956
957 parse_early_param();
958
1221ed10
RK
959#ifdef CONFIG_MMU
960 early_paging_init(mdesc);
961#endif
7c927322 962 setup_dma_zone(mdesc);
0371d3f7 963 sanity_check_meminfo();
1c2f87c2 964 arm_memblock_init(mdesc);
2778f620 965
4b5f32ce 966 paging_init(mdesc);
11b9369c 967 request_standard_resources(mdesc);
1da177e4 968
a528721d
RK
969 if (mdesc->restart)
970 arm_pm_restart = mdesc->restart;
971
93c02ab4
GL
972 unflatten_device_tree();
973
5587164e 974 arm_dt_init_cpu_maps();
05774088 975 psci_init();
5882bfef 976 xen_early_init();
7bbb7940 977#ifdef CONFIG_SMP
abcee5fb 978 if (is_smp()) {
b382b940
JM
979 if (!mdesc->smp_init || !mdesc->smp_init()) {
980 if (psci_smp_available())
981 smp_set_ops(&psci_smp_ops);
982 else if (mdesc->smp)
983 smp_set_ops(mdesc->smp);
984 }
f00ec48f 985 smp_init_cpus();
8cf72172 986 smp_build_mpidr_hash();
abcee5fb 987 }
7bbb7940 988#endif
4588c34d
DM
989
990 if (!is_smp())
991 hyp_mode_check();
992
3c57fb43 993 reserve_crashkernel();
7bbb7940 994
52108641 995#ifdef CONFIG_MULTI_IRQ_HANDLER
996 handle_arch_irq = mdesc->handle_irq;
997#endif
1da177e4
LT
998
999#ifdef CONFIG_VT
1000#if defined(CONFIG_VGA_CONSOLE)
1001 conswitchp = &vga_con;
1002#elif defined(CONFIG_DUMMY_CONSOLE)
1003 conswitchp = &dummy_con;
1004#endif
1005#endif
dec12e62
RK
1006
1007 if (mdesc->init_early)
1008 mdesc->init_early();
1da177e4
LT
1009}
1010
1011
1012static int __init topology_init(void)
1013{
1014 int cpu;
1015
66fb8bd2
RK
1016 for_each_possible_cpu(cpu) {
1017 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1018 cpuinfo->cpu.hotpluggable = 1;
1019 register_cpu(&cpuinfo->cpu, cpu);
1020 }
1da177e4
LT
1021
1022 return 0;
1023}
1da177e4
LT
1024subsys_initcall(topology_init);
1025
e119bfff
RK
1026#ifdef CONFIG_HAVE_PROC_CPU
1027static int __init proc_cpu_init(void)
1028{
1029 struct proc_dir_entry *res;
1030
1031 res = proc_mkdir("cpu", NULL);
1032 if (!res)
1033 return -ENOMEM;
1034 return 0;
1035}
1036fs_initcall(proc_cpu_init);
1037#endif
1038
1da177e4
LT
1039static const char *hwcap_str[] = {
1040 "swp",
1041 "half",
1042 "thumb",
1043 "26bit",
1044 "fastmult",
1045 "fpa",
1046 "vfp",
1047 "edsp",
1048 "java",
8f7f9435 1049 "iwmmxt",
99e4a6dd 1050 "crunch",
4369ae16 1051 "thumbee",
2bedbdf4 1052 "neon",
7279dc3e
CM
1053 "vfpv3",
1054 "vfpv3d16",
254cdf8e
WD
1055 "tls",
1056 "vfpv4",
1057 "idiva",
1058 "idivt",
ab8d46c0 1059 "vfpd32",
a469abd0 1060 "lpae",
e9faebc6 1061 "evtstrm",
1da177e4
LT
1062 NULL
1063};
1064
b342ea4e 1065static const char *hwcap2_str[] = {
8258a989
AB
1066 "aes",
1067 "pmull",
1068 "sha1",
1069 "sha2",
1070 "crc32",
b342ea4e
AB
1071 NULL
1072};
1073
1da177e4
LT
1074static int c_show(struct seq_file *m, void *v)
1075{
b4b8f770
LP
1076 int i, j;
1077 u32 cpuid;
1da177e4 1078
1da177e4 1079 for_each_online_cpu(i) {
15559722
RK
1080 /*
1081 * glibc reads /proc/cpuinfo to determine the number of
1082 * online processors, looking for lines beginning with
1083 * "processor". Give glibc what it expects.
1084 */
1085 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1086 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1087 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1088 cpu_name, cpuid & 15, elf_platform);
1089
4bf9636c
PM
1090#if defined(CONFIG_SMP)
1091 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1092 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1093 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1094#else
1095 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1096 loops_per_jiffy / (500000/HZ),
1097 (loops_per_jiffy / (5000/HZ)) % 100);
1098#endif
b4b8f770
LP
1099 /* dump out the processor features */
1100 seq_puts(m, "Features\t: ");
1da177e4 1101
b4b8f770
LP
1102 for (j = 0; hwcap_str[j]; j++)
1103 if (elf_hwcap & (1 << j))
1104 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1105
b342ea4e
AB
1106 for (j = 0; hwcap2_str[j]; j++)
1107 if (elf_hwcap2 & (1 << j))
1108 seq_printf(m, "%s ", hwcap2_str[j]);
1109
b4b8f770
LP
1110 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1111 seq_printf(m, "CPU architecture: %s\n",
1112 proc_arch[cpu_architecture()]);
1da177e4 1113
b4b8f770
LP
1114 if ((cpuid & 0x0008f000) == 0x00000000) {
1115 /* pre-ARM7 */
1116 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1117 } else {
b4b8f770
LP
1118 if ((cpuid & 0x0008f000) == 0x00007000) {
1119 /* ARM7 */
1120 seq_printf(m, "CPU variant\t: 0x%02x\n",
1121 (cpuid >> 16) & 127);
1122 } else {
1123 /* post-ARM7 */
1124 seq_printf(m, "CPU variant\t: 0x%x\n",
1125 (cpuid >> 20) & 15);
1126 }
1127 seq_printf(m, "CPU part\t: 0x%03x\n",
1128 (cpuid >> 4) & 0xfff);
1da177e4 1129 }
b4b8f770 1130 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1131 }
1da177e4
LT
1132
1133 seq_printf(m, "Hardware\t: %s\n", machine_name);
1134 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1135 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1136
1137 return 0;
1138}
1139
1140static void *c_start(struct seq_file *m, loff_t *pos)
1141{
1142 return *pos < 1 ? (void *)1 : NULL;
1143}
1144
1145static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1146{
1147 ++*pos;
1148 return NULL;
1149}
1150
1151static void c_stop(struct seq_file *m, void *v)
1152{
1153}
1154
2ffd6e18 1155const struct seq_operations cpuinfo_op = {
1da177e4
LT
1156 .start = c_start,
1157 .next = c_next,
1158 .stop = c_stop,
1159 .show = c_show
1160};