]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/arm/kernel/setup.c
ARM: SWP emulation: only initialise on ARMv7 CPUs
[mirror_ubuntu-zesty-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
1da177e4 33
b86040a5 34#include <asm/unified.h>
15d07dc9 35#include <asm/cp15.h>
1da177e4 36#include <asm/cpu.h>
0ba8b9b2 37#include <asm/cputype.h>
1da177e4 38#include <asm/elf.h>
1da177e4 39#include <asm/procinfo.h>
05774088 40#include <asm/psci.h>
37efe642 41#include <asm/sections.h>
1da177e4 42#include <asm/setup.h>
f00ec48f 43#include <asm/smp_plat.h>
1da177e4
LT
44#include <asm/mach-types.h>
45#include <asm/cacheflush.h>
46097c7d 46#include <asm/cachetype.h>
1da177e4
LT
47#include <asm/tlbflush.h>
48
93c02ab4 49#include <asm/prom.h>
1da177e4
LT
50#include <asm/mach/arch.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
9f97da78
DH
53#include <asm/system_info.h>
54#include <asm/system_misc.h>
5cbad0eb 55#include <asm/traps.h>
bff595c1 56#include <asm/unwind.h>
1c16d242 57#include <asm/memblock.h>
4588c34d 58#include <asm/virt.h>
1da177e4 59
4cd9d6f7 60#include "atags.h"
0fc1c832 61
1da177e4
LT
62
63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64char fpe_type[8];
65
66static int __init fpe_setup(char *line)
67{
68 memcpy(fpe_type, line, 8);
69 return 1;
70}
71
72__setup("fpe=", fpe_setup);
73#endif
74
ca8f0b0a 75extern void init_default_cache_policy(unsigned long);
ff69a4c8 76extern void paging_init(const struct machine_desc *desc);
a77e0c7b
SS
77extern void early_paging_init(const struct machine_desc *,
78 struct proc_info_list *);
0371d3f7 79extern void sanity_check_meminfo(void);
16d6d5b0 80extern enum reboot_mode reboot_mode;
ff69a4c8 81extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
82
83unsigned int processor_id;
c18f6581 84EXPORT_SYMBOL(processor_id);
0385ebc0 85unsigned int __machine_arch_type __read_mostly;
1da177e4 86EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 87unsigned int cacheid __read_mostly;
c0e95878 88EXPORT_SYMBOL(cacheid);
1da177e4 89
9d20fdd5
BG
90unsigned int __atags_pointer __initdata;
91
1da177e4
LT
92unsigned int system_rev;
93EXPORT_SYMBOL(system_rev);
94
95unsigned int system_serial_low;
96EXPORT_SYMBOL(system_serial_low);
97
98unsigned int system_serial_high;
99EXPORT_SYMBOL(system_serial_high);
100
0385ebc0 101unsigned int elf_hwcap __read_mostly;
1da177e4
LT
102EXPORT_SYMBOL(elf_hwcap);
103
b342ea4e
AB
104unsigned int elf_hwcap2 __read_mostly;
105EXPORT_SYMBOL(elf_hwcap2);
106
1da177e4
LT
107
108#ifdef MULTI_CPU
0385ebc0 109struct processor processor __read_mostly;
1da177e4
LT
110#endif
111#ifdef MULTI_TLB
0385ebc0 112struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
113#endif
114#ifdef MULTI_USER
0385ebc0 115struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
116#endif
117#ifdef MULTI_CACHE
0385ebc0 118struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 119#endif
953233dc 120#ifdef CONFIG_OUTER_CACHE
0385ebc0 121struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 122EXPORT_SYMBOL(outer_cache);
953233dc 123#endif
1da177e4 124
2ecccf90
DM
125/*
126 * Cached cpu_architecture() result for use by assembler code.
127 * C code should use the cpu_architecture() function instead of accessing this
128 * variable directly.
129 */
130int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
131
ccea7a19
RK
132struct stack {
133 u32 irq[3];
134 u32 abt[3];
135 u32 und[3];
136} ____cacheline_aligned;
137
55bdd694 138#ifndef CONFIG_CPU_V7M
ccea7a19 139static struct stack stacks[NR_CPUS];
55bdd694 140#endif
ccea7a19 141
1da177e4
LT
142char elf_platform[ELF_PLATFORM_SIZE];
143EXPORT_SYMBOL(elf_platform);
144
1da177e4
LT
145static const char *cpu_name;
146static const char *machine_name;
48ab7e09 147static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 148const struct machine_desc *machine_desc __initdata;
1da177e4 149
1da177e4
LT
150static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
151#define ENDIANNESS ((char)endian_test.l)
152
153DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
154
155/*
156 * Standard memory resources
157 */
158static struct resource mem_res[] = {
740e518e
GKH
159 {
160 .name = "Video RAM",
161 .start = 0,
162 .end = 0,
163 .flags = IORESOURCE_MEM
164 },
165 {
a36d8e5b 166 .name = "Kernel code",
740e518e
GKH
167 .start = 0,
168 .end = 0,
169 .flags = IORESOURCE_MEM
170 },
171 {
172 .name = "Kernel data",
173 .start = 0,
174 .end = 0,
175 .flags = IORESOURCE_MEM
176 }
1da177e4
LT
177};
178
179#define video_ram mem_res[0]
180#define kernel_code mem_res[1]
181#define kernel_data mem_res[2]
182
183static struct resource io_res[] = {
740e518e
GKH
184 {
185 .name = "reserved",
186 .start = 0x3bc,
187 .end = 0x3be,
188 .flags = IORESOURCE_IO | IORESOURCE_BUSY
189 },
190 {
191 .name = "reserved",
192 .start = 0x378,
193 .end = 0x37f,
194 .flags = IORESOURCE_IO | IORESOURCE_BUSY
195 },
196 {
197 .name = "reserved",
198 .start = 0x278,
199 .end = 0x27f,
200 .flags = IORESOURCE_IO | IORESOURCE_BUSY
201 }
1da177e4
LT
202};
203
204#define lp0 io_res[0]
205#define lp1 io_res[1]
206#define lp2 io_res[2]
207
1da177e4
LT
208static const char *proc_arch[] = {
209 "undefined/unknown",
210 "3",
211 "4",
212 "4T",
213 "5",
214 "5T",
215 "5TE",
216 "5TEJ",
217 "6TEJ",
6b090a25 218 "7",
55bdd694 219 "7M",
1da177e4
LT
220 "?(12)",
221 "?(13)",
222 "?(14)",
223 "?(15)",
224 "?(16)",
225 "?(17)",
226};
227
55bdd694
CM
228#ifdef CONFIG_CPU_V7M
229static int __get_cpu_architecture(void)
230{
231 return CPU_ARCH_ARMv7M;
232}
233#else
2ecccf90 234static int __get_cpu_architecture(void)
1da177e4
LT
235{
236 int cpu_arch;
237
0ba8b9b2 238 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 239 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
240 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
241 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
242 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
243 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
244 if (cpu_arch)
245 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 246 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
247 unsigned int mmfr0;
248
249 /* Revised CPUID format. Read the Memory Model Feature
250 * Register 0 and check for VMSAv7 or PMSAv7 */
251 asm("mrc p15, 0, %0, c0, c1, 4"
252 : "=r" (mmfr0));
315cfe78
CM
253 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
254 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
255 cpu_arch = CPU_ARCH_ARMv7;
256 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
257 (mmfr0 & 0x000000f0) == 0x00000020)
258 cpu_arch = CPU_ARCH_ARMv6;
259 else
260 cpu_arch = CPU_ARCH_UNKNOWN;
261 } else
262 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
263
264 return cpu_arch;
265}
55bdd694 266#endif
1da177e4 267
2ecccf90
DM
268int __pure cpu_architecture(void)
269{
270 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
271
272 return __cpu_architecture;
273}
274
8925ec4c
WD
275static int cpu_has_aliasing_icache(unsigned int arch)
276{
277 int aliasing_icache;
278 unsigned int id_reg, num_sets, line_size;
279
7f94e9cc
WD
280 /* PIPT caches never alias. */
281 if (icache_is_pipt())
282 return 0;
283
8925ec4c
WD
284 /* arch specifies the register format */
285 switch (arch) {
286 case CPU_ARCH_ARMv7:
5fb31a96
LW
287 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
288 : /* No output operands */
8925ec4c 289 : "r" (1));
5fb31a96
LW
290 isb();
291 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
292 : "=r" (id_reg));
8925ec4c
WD
293 line_size = 4 << ((id_reg & 0x7) + 2);
294 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
295 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
296 break;
297 case CPU_ARCH_ARMv6:
298 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
299 break;
300 default:
301 /* I-cache aliases will be handled by D-cache aliasing code */
302 aliasing_icache = 0;
303 }
304
305 return aliasing_icache;
306}
307
c0e95878
RK
308static void __init cacheid_init(void)
309{
c0e95878
RK
310 unsigned int arch = cpu_architecture();
311
55bdd694
CM
312 if (arch == CPU_ARCH_ARMv7M) {
313 cacheid = 0;
314 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 315 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
316 if ((cachetype & (7 << 29)) == 4 << 29) {
317 /* ARMv7 register format */
72dc53ac 318 arch = CPU_ARCH_ARMv7;
b57ee99f 319 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
320 switch (cachetype & (3 << 14)) {
321 case (1 << 14):
b57ee99f 322 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
323 break;
324 case (3 << 14):
325 cacheid |= CACHEID_PIPT;
326 break;
327 }
8925ec4c 328 } else {
72dc53ac
WD
329 arch = CPU_ARCH_ARMv6;
330 if (cachetype & (1 << 23))
331 cacheid = CACHEID_VIPT_ALIASING;
332 else
333 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 334 }
72dc53ac
WD
335 if (cpu_has_aliasing_icache(arch))
336 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
337 } else {
338 cacheid = CACHEID_VIVT;
339 }
2b4ae1f1 340
1b0f6681 341 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
342 cache_is_vivt() ? "VIVT" :
343 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 344 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
345 cache_is_vivt() ? "VIVT" :
346 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 347 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 348 icache_is_pipt() ? "PIPT" :
2b4ae1f1 349 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
350}
351
1da177e4
LT
352/*
353 * These functions re-use the assembly code in head.S, which
354 * already provide the required functionality.
355 */
0f44ba1d 356extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 357
93c02ab4 358void __init early_print(const char *str, ...)
6fc31d54
RK
359{
360 extern void printascii(const char *);
361 char buf[256];
362 va_list ap;
363
364 va_start(ap, str);
365 vsnprintf(buf, sizeof(buf), str, ap);
366 va_end(ap);
367
368#ifdef CONFIG_DEBUG_LL
369 printascii(buf);
370#endif
371 printk("%s", buf);
372}
373
8164f7af
SB
374static void __init cpuid_init_hwcaps(void)
375{
a469abd0 376 unsigned int divide_instrs, vmsa;
8164f7af
SB
377
378 if (cpu_architecture() < CPU_ARCH_ARMv7)
379 return;
380
381 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
382
383 switch (divide_instrs) {
384 case 2:
385 elf_hwcap |= HWCAP_IDIVA;
386 case 1:
387 elf_hwcap |= HWCAP_IDIVT;
388 }
a469abd0
WD
389
390 /* LPAE implies atomic ldrd/strd instructions */
391 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
392 if (vmsa >= 5)
393 elf_hwcap |= HWCAP_LPAE;
8164f7af
SB
394}
395
f159f4ed
TL
396static void __init feat_v6_fixup(void)
397{
398 int id = read_cpuid_id();
399
400 if ((id & 0xff0f0000) != 0x41070000)
401 return;
402
403 /*
404 * HWCAP_TLS is available only on 1136 r1p0 and later,
405 * see also kuser_get_tls_init.
406 */
407 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
408 elf_hwcap &= ~HWCAP_TLS;
409}
410
ccea7a19
RK
411/*
412 * cpu_init - initialise one CPU.
413 *
90f1e084 414 * cpu_init sets up the per-CPU stacks.
ccea7a19 415 */
1783d457 416void notrace cpu_init(void)
ccea7a19 417{
55bdd694 418#ifndef CONFIG_CPU_V7M
ccea7a19
RK
419 unsigned int cpu = smp_processor_id();
420 struct stack *stk = &stacks[cpu];
421
422 if (cpu >= NR_CPUS) {
1b0f6681 423 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
424 BUG();
425 }
426
14318efb
RH
427 /*
428 * This only works on resume and secondary cores. For booting on the
429 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
430 */
431 set_my_cpu_offset(per_cpu_offset(cpu));
432
b69874e4
RK
433 cpu_proc_init();
434
b86040a5
CM
435 /*
436 * Define the placement constraint for the inline asm directive below.
437 * In Thumb-2, msr with an immediate value is not allowed.
438 */
439#ifdef CONFIG_THUMB2_KERNEL
440#define PLC "r"
441#else
442#define PLC "I"
443#endif
444
ccea7a19
RK
445 /*
446 * setup stacks for re-entrant exception handlers
447 */
448 __asm__ (
449 "msr cpsr_c, %1\n\t"
b86040a5
CM
450 "add r14, %0, %2\n\t"
451 "mov sp, r14\n\t"
ccea7a19 452 "msr cpsr_c, %3\n\t"
b86040a5
CM
453 "add r14, %0, %4\n\t"
454 "mov sp, r14\n\t"
ccea7a19 455 "msr cpsr_c, %5\n\t"
b86040a5
CM
456 "add r14, %0, %6\n\t"
457 "mov sp, r14\n\t"
ccea7a19
RK
458 "msr cpsr_c, %7"
459 :
460 : "r" (stk),
b86040a5 461 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 462 "I" (offsetof(struct stack, irq[0])),
b86040a5 463 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 464 "I" (offsetof(struct stack, abt[0])),
b86040a5 465 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 466 "I" (offsetof(struct stack, und[0])),
b86040a5 467 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 468 : "r14");
55bdd694 469#endif
ccea7a19
RK
470}
471
18d7f152 472u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
473
474void __init smp_setup_processor_id(void)
475{
476 int i;
cb8cf4f8
LP
477 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
478 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
479
480 cpu_logical_map(0) = cpu;
cb8cf4f8 481 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
482 cpu_logical_map(i) = i == cpu ? 0 : i;
483
9394c1c6
ML
484 /*
485 * clear __my_cpu_offset on boot CPU to avoid hang caused by
486 * using percpu variable early, for example, lockdep will
487 * access percpu variable inside lock_release
488 */
489 set_my_cpu_offset(0);
490
1b0f6681 491 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
492}
493
8cf72172
LP
494struct mpidr_hash mpidr_hash;
495#ifdef CONFIG_SMP
496/**
497 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
498 * level in order to build a linear index from an
499 * MPIDR value. Resulting algorithm is a collision
500 * free hash carried out through shifting and ORing
501 */
502static void __init smp_build_mpidr_hash(void)
503{
504 u32 i, affinity;
505 u32 fs[3], bits[3], ls, mask = 0;
506 /*
507 * Pre-scan the list of MPIDRS and filter out bits that do
508 * not contribute to affinity levels, ie they never toggle.
509 */
510 for_each_possible_cpu(i)
511 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
512 pr_debug("mask of set bits 0x%x\n", mask);
513 /*
514 * Find and stash the last and first bit set at all affinity levels to
515 * check how many bits are required to represent them.
516 */
517 for (i = 0; i < 3; i++) {
518 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
519 /*
520 * Find the MSB bit and LSB bits position
521 * to determine how many bits are required
522 * to express the affinity level.
523 */
524 ls = fls(affinity);
525 fs[i] = affinity ? ffs(affinity) - 1 : 0;
526 bits[i] = ls - fs[i];
527 }
528 /*
529 * An index can be created from the MPIDR by isolating the
530 * significant bits at each affinity level and by shifting
531 * them in order to compress the 24 bits values space to a
532 * compressed set of values. This is equivalent to hashing
533 * the MPIDR through shifting and ORing. It is a collision free
534 * hash though not minimal since some levels might contain a number
535 * of CPUs that is not an exact power of 2 and their bit
536 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
537 */
538 mpidr_hash.shift_aff[0] = fs[0];
539 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
540 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
541 (bits[1] + bits[0]);
542 mpidr_hash.mask = mask;
543 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
544 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
545 mpidr_hash.shift_aff[0],
546 mpidr_hash.shift_aff[1],
547 mpidr_hash.shift_aff[2],
548 mpidr_hash.mask,
549 mpidr_hash.bits);
550 /*
551 * 4x is an arbitrary value used to warn on a hash table much bigger
552 * than expected on most systems.
553 */
554 if (mpidr_hash_size() > 4 * num_possible_cpus())
555 pr_warn("Large number of MPIDR hash buckets detected\n");
556 sync_cache_w(&mpidr_hash);
557}
558#endif
559
b69874e4
RK
560static void __init setup_processor(void)
561{
562 struct proc_info_list *list;
563
564 /*
565 * locate processor in the list of supported processor
566 * types. The linker builds this table for us from the
567 * entries in arch/arm/mm/proc-*.S
568 */
569 list = lookup_processor_type(read_cpuid_id());
570 if (!list) {
1b0f6681
OJ
571 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
572 read_cpuid_id());
b69874e4
RK
573 while (1);
574 }
575
576 cpu_name = list->cpu_name;
2ecccf90 577 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
578
579#ifdef MULTI_CPU
580 processor = *list->proc;
581#endif
582#ifdef MULTI_TLB
583 cpu_tlb = *list->tlb;
584#endif
585#ifdef MULTI_USER
586 cpu_user = *list->user;
587#endif
588#ifdef MULTI_CACHE
589 cpu_cache = *list->cache;
590#endif
591
1b0f6681
OJ
592 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
593 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 594 proc_arch[cpu_architecture()], get_cr());
b69874e4 595
a34dbfb0
WD
596 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
597 list->arch_name, ENDIANNESS);
598 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
599 list->elf_name, ENDIANNESS);
b69874e4 600 elf_hwcap = list->elf_hwcap;
8164f7af
SB
601
602 cpuid_init_hwcaps();
603
b69874e4 604#ifndef CONFIG_ARM_THUMB
c40e3641 605 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 606#endif
ca8f0b0a
RK
607#ifdef CONFIG_MMU
608 init_default_cache_policy(list->__cpu_mm_mmu_flags);
609#endif
92871b94
RH
610 erratum_a15_798181_init();
611
b69874e4
RK
612 feat_v6_fixup();
613
614 cacheid_init();
615 cpu_init();
616}
617
93c02ab4 618void __init dump_machine_table(void)
1da177e4 619{
ff69a4c8 620 const struct machine_desc *p;
1da177e4 621
6291319d
GL
622 early_print("Available machine support:\n\nID (hex)\tNAME\n");
623 for_each_machine_desc(p)
dce72dd0 624 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 625
dce72dd0 626 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 627
dce72dd0
NP
628 while (true)
629 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
630}
631
6a5014aa 632int __init arm_add_memory(u64 start, u64 size)
3a669411 633{
6d7d5da7 634 u64 aligned_start;
4b5f32ce 635
3a669411
RK
636 /*
637 * Ensure that start/size are aligned to a page boundary.
638 * Size is appropriately rounded down, start is rounded up.
639 */
640 size -= start & ~PAGE_MASK;
6d7d5da7 641 aligned_start = PAGE_ALIGN(start);
e5ab8580 642
6d7d5da7
MD
643#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
644 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
645 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
646 (long long)start);
6d7d5da7
MD
647 return -EINVAL;
648 }
649
650 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
651 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
652 (long long)start);
e5ab8580
WD
653 /*
654 * To ensure bank->start + bank->size is representable in
655 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
656 * This means we lose a page after masking.
657 */
6d7d5da7 658 size = ULONG_MAX - aligned_start;
e5ab8580
WD
659 }
660#endif
661
571b1437
RK
662 if (aligned_start < PHYS_OFFSET) {
663 if (aligned_start + size <= PHYS_OFFSET) {
664 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
665 aligned_start, aligned_start + size);
666 return -EINVAL;
667 }
668
669 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
670 aligned_start, (u64)PHYS_OFFSET);
671
672 size -= PHYS_OFFSET - aligned_start;
673 aligned_start = PHYS_OFFSET;
674 }
675
1c2f87c2
LA
676 start = aligned_start;
677 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
678
679 /*
680 * Check whether this memory region has non-zero size or
681 * invalid node number.
682 */
1c2f87c2 683 if (size == 0)
4b5f32ce
NP
684 return -EINVAL;
685
1c2f87c2 686 memblock_add(start, size);
4b5f32ce 687 return 0;
3a669411
RK
688}
689
1da177e4
LT
690/*
691 * Pick out the memory size. We look for mem=size@start,
692 * where start and size are "size[KkMm]"
693 */
1c2f87c2 694
2b0d8c25 695static int __init early_mem(char *p)
1da177e4
LT
696{
697 static int usermem __initdata = 0;
6a5014aa
MD
698 u64 size;
699 u64 start;
2b0d8c25 700 char *endp;
1da177e4
LT
701
702 /*
703 * If the user specifies memory size, we
704 * blow away any automatically generated
705 * size.
706 */
707 if (usermem == 0) {
708 usermem = 1;
1c2f87c2
LA
709 memblock_remove(memblock_start_of_DRAM(),
710 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
711 }
712
713 start = PHYS_OFFSET;
2b0d8c25
JK
714 size = memparse(p, &endp);
715 if (*endp == '@')
716 start = memparse(endp + 1, NULL);
1da177e4 717
1c97b73e 718 arm_add_memory(start, size);
1da177e4 719
2b0d8c25 720 return 0;
1da177e4 721}
2b0d8c25 722early_param("mem", early_mem);
1da177e4 723
ff69a4c8 724static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 725{
11b9369c 726 struct memblock_region *region;
1da177e4 727 struct resource *res;
1da177e4 728
37efe642
RK
729 kernel_code.start = virt_to_phys(_text);
730 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 731 kernel_data.start = virt_to_phys(_sdata);
37efe642 732 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 733
11b9369c 734 for_each_memblock(memory, region) {
ca474408 735 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 736 res->name = "System RAM";
11b9369c
DZ
737 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
738 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
739 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
740
741 request_resource(&iomem_resource, res);
742
743 if (kernel_code.start >= res->start &&
744 kernel_code.end <= res->end)
745 request_resource(res, &kernel_code);
746 if (kernel_data.start >= res->start &&
747 kernel_data.end <= res->end)
748 request_resource(res, &kernel_data);
749 }
750
751 if (mdesc->video_start) {
752 video_ram.start = mdesc->video_start;
753 video_ram.end = mdesc->video_end;
754 request_resource(&iomem_resource, &video_ram);
755 }
756
757 /*
758 * Some machines don't have the possibility of ever
759 * possessing lp0, lp1 or lp2
760 */
761 if (mdesc->reserve_lp0)
762 request_resource(&ioport_resource, &lp0);
763 if (mdesc->reserve_lp1)
764 request_resource(&ioport_resource, &lp1);
765 if (mdesc->reserve_lp2)
766 request_resource(&ioport_resource, &lp2);
767}
768
1da177e4
LT
769#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
770struct screen_info screen_info = {
771 .orig_video_lines = 30,
772 .orig_video_cols = 80,
773 .orig_video_mode = 0,
774 .orig_video_ega_bx = 0,
775 .orig_video_isVGA = 1,
776 .orig_video_points = 8
777};
4394c124 778#endif
1da177e4 779
1da177e4
LT
780static int __init customize_machine(void)
781{
883a106b
AB
782 /*
783 * customizes platform devices, or adds new ones
784 * On DT based machines, we fall back to populating the
785 * machine from the device tree, if no callback is provided,
786 * otherwise we would always need an init_machine callback.
787 */
8ff1443c
RK
788 if (machine_desc->init_machine)
789 machine_desc->init_machine();
883a106b
AB
790#ifdef CONFIG_OF
791 else
792 of_platform_populate(NULL, of_default_bus_match_table,
793 NULL, NULL);
794#endif
1da177e4
LT
795 return 0;
796}
797arch_initcall(customize_machine);
798
90de4137
SG
799static int __init init_machine_late(void)
800{
801 if (machine_desc->init_late)
802 machine_desc->init_late();
803 return 0;
804}
805late_initcall(init_machine_late);
806
3c57fb43
MW
807#ifdef CONFIG_KEXEC
808static inline unsigned long long get_total_mem(void)
809{
810 unsigned long total;
811
812 total = max_low_pfn - min_low_pfn;
813 return total << PAGE_SHIFT;
814}
815
816/**
817 * reserve_crashkernel() - reserves memory are for crash kernel
818 *
819 * This function reserves memory area given in "crashkernel=" kernel command
820 * line parameter. The memory reserved is used by a dump capture kernel when
821 * primary kernel is crashing.
822 */
823static void __init reserve_crashkernel(void)
824{
825 unsigned long long crash_size, crash_base;
826 unsigned long long total_mem;
827 int ret;
828
829 total_mem = get_total_mem();
830 ret = parse_crashkernel(boot_command_line, total_mem,
831 &crash_size, &crash_base);
832 if (ret)
833 return;
834
84f452b1 835 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 836 if (ret < 0) {
1b0f6681
OJ
837 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
838 (unsigned long)crash_base);
3c57fb43
MW
839 return;
840 }
841
1b0f6681
OJ
842 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
843 (unsigned long)(crash_size >> 20),
844 (unsigned long)(crash_base >> 20),
845 (unsigned long)(total_mem >> 20));
3c57fb43
MW
846
847 crashk_res.start = crash_base;
848 crashk_res.end = crash_base + crash_size - 1;
849 insert_resource(&iomem_resource, &crashk_res);
850}
851#else
852static inline void reserve_crashkernel(void) {}
853#endif /* CONFIG_KEXEC */
854
4588c34d
DM
855void __init hyp_mode_check(void)
856{
857#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
858 sync_boot_mode();
859
4588c34d
DM
860 if (is_hyp_mode_available()) {
861 pr_info("CPU: All CPU(s) started in HYP mode.\n");
862 pr_info("CPU: Virtualization extensions available.\n");
863 } else if (is_hyp_mode_mismatched()) {
864 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
865 __boot_cpu_mode & MODE_MASK);
866 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
867 } else
868 pr_info("CPU: All CPU(s) started in SVC mode.\n");
869#endif
870}
871
6291319d
GL
872void __init setup_arch(char **cmdline_p)
873{
ff69a4c8 874 const struct machine_desc *mdesc;
6291319d 875
6291319d 876 setup_processor();
93c02ab4
GL
877 mdesc = setup_machine_fdt(__atags_pointer);
878 if (!mdesc)
b8b499c8 879 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
880 machine_desc = mdesc;
881 machine_name = mdesc->name;
882
16d6d5b0
RH
883 if (mdesc->reboot_mode != REBOOT_HARD)
884 reboot_mode = mdesc->reboot_mode;
6291319d 885
37efe642
RK
886 init_mm.start_code = (unsigned long) _text;
887 init_mm.end_code = (unsigned long) _etext;
888 init_mm.end_data = (unsigned long) _edata;
889 init_mm.brk = (unsigned long) _end;
1da177e4 890
48ab7e09
JK
891 /* populate cmd_line too for later use, preserving boot_command_line */
892 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
893 *cmdline_p = cmd_line;
2b0d8c25
JK
894
895 parse_early_param();
896
a77e0c7b 897 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
7c927322 898 setup_dma_zone(mdesc);
0371d3f7 899 sanity_check_meminfo();
1c2f87c2 900 arm_memblock_init(mdesc);
2778f620 901
4b5f32ce 902 paging_init(mdesc);
11b9369c 903 request_standard_resources(mdesc);
1da177e4 904
a528721d
RK
905 if (mdesc->restart)
906 arm_pm_restart = mdesc->restart;
907
93c02ab4
GL
908 unflatten_device_tree();
909
5587164e 910 arm_dt_init_cpu_maps();
05774088 911 psci_init();
7bbb7940 912#ifdef CONFIG_SMP
abcee5fb 913 if (is_smp()) {
b382b940
JM
914 if (!mdesc->smp_init || !mdesc->smp_init()) {
915 if (psci_smp_available())
916 smp_set_ops(&psci_smp_ops);
917 else if (mdesc->smp)
918 smp_set_ops(mdesc->smp);
919 }
f00ec48f 920 smp_init_cpus();
8cf72172 921 smp_build_mpidr_hash();
abcee5fb 922 }
7bbb7940 923#endif
4588c34d
DM
924
925 if (!is_smp())
926 hyp_mode_check();
927
3c57fb43 928 reserve_crashkernel();
7bbb7940 929
52108641 930#ifdef CONFIG_MULTI_IRQ_HANDLER
931 handle_arch_irq = mdesc->handle_irq;
932#endif
1da177e4
LT
933
934#ifdef CONFIG_VT
935#if defined(CONFIG_VGA_CONSOLE)
936 conswitchp = &vga_con;
937#elif defined(CONFIG_DUMMY_CONSOLE)
938 conswitchp = &dummy_con;
939#endif
940#endif
dec12e62
RK
941
942 if (mdesc->init_early)
943 mdesc->init_early();
1da177e4
LT
944}
945
946
947static int __init topology_init(void)
948{
949 int cpu;
950
66fb8bd2
RK
951 for_each_possible_cpu(cpu) {
952 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
953 cpuinfo->cpu.hotpluggable = 1;
954 register_cpu(&cpuinfo->cpu, cpu);
955 }
1da177e4
LT
956
957 return 0;
958}
1da177e4
LT
959subsys_initcall(topology_init);
960
e119bfff
RK
961#ifdef CONFIG_HAVE_PROC_CPU
962static int __init proc_cpu_init(void)
963{
964 struct proc_dir_entry *res;
965
966 res = proc_mkdir("cpu", NULL);
967 if (!res)
968 return -ENOMEM;
969 return 0;
970}
971fs_initcall(proc_cpu_init);
972#endif
973
1da177e4
LT
974static const char *hwcap_str[] = {
975 "swp",
976 "half",
977 "thumb",
978 "26bit",
979 "fastmult",
980 "fpa",
981 "vfp",
982 "edsp",
983 "java",
8f7f9435 984 "iwmmxt",
99e4a6dd 985 "crunch",
4369ae16 986 "thumbee",
2bedbdf4 987 "neon",
7279dc3e
CM
988 "vfpv3",
989 "vfpv3d16",
254cdf8e
WD
990 "tls",
991 "vfpv4",
992 "idiva",
993 "idivt",
ab8d46c0 994 "vfpd32",
a469abd0 995 "lpae",
e9faebc6 996 "evtstrm",
1da177e4
LT
997 NULL
998};
999
b342ea4e 1000static const char *hwcap2_str[] = {
8258a989
AB
1001 "aes",
1002 "pmull",
1003 "sha1",
1004 "sha2",
1005 "crc32",
b342ea4e
AB
1006 NULL
1007};
1008
1da177e4
LT
1009static int c_show(struct seq_file *m, void *v)
1010{
b4b8f770
LP
1011 int i, j;
1012 u32 cpuid;
1da177e4 1013
1da177e4 1014 for_each_online_cpu(i) {
15559722
RK
1015 /*
1016 * glibc reads /proc/cpuinfo to determine the number of
1017 * online processors, looking for lines beginning with
1018 * "processor". Give glibc what it expects.
1019 */
1020 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1021 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1022 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1023 cpu_name, cpuid & 15, elf_platform);
1024
b4b8f770
LP
1025 /* dump out the processor features */
1026 seq_puts(m, "Features\t: ");
1da177e4 1027
b4b8f770
LP
1028 for (j = 0; hwcap_str[j]; j++)
1029 if (elf_hwcap & (1 << j))
1030 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1031
b342ea4e
AB
1032 for (j = 0; hwcap2_str[j]; j++)
1033 if (elf_hwcap2 & (1 << j))
1034 seq_printf(m, "%s ", hwcap2_str[j]);
1035
b4b8f770
LP
1036 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1037 seq_printf(m, "CPU architecture: %s\n",
1038 proc_arch[cpu_architecture()]);
1da177e4 1039
b4b8f770
LP
1040 if ((cpuid & 0x0008f000) == 0x00000000) {
1041 /* pre-ARM7 */
1042 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1043 } else {
b4b8f770
LP
1044 if ((cpuid & 0x0008f000) == 0x00007000) {
1045 /* ARM7 */
1046 seq_printf(m, "CPU variant\t: 0x%02x\n",
1047 (cpuid >> 16) & 127);
1048 } else {
1049 /* post-ARM7 */
1050 seq_printf(m, "CPU variant\t: 0x%x\n",
1051 (cpuid >> 20) & 15);
1052 }
1053 seq_printf(m, "CPU part\t: 0x%03x\n",
1054 (cpuid >> 4) & 0xfff);
1da177e4 1055 }
b4b8f770 1056 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1057 }
1da177e4
LT
1058
1059 seq_printf(m, "Hardware\t: %s\n", machine_name);
1060 seq_printf(m, "Revision\t: %04x\n", system_rev);
1061 seq_printf(m, "Serial\t\t: %08x%08x\n",
1062 system_serial_high, system_serial_low);
1063
1064 return 0;
1065}
1066
1067static void *c_start(struct seq_file *m, loff_t *pos)
1068{
1069 return *pos < 1 ? (void *)1 : NULL;
1070}
1071
1072static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1073{
1074 ++*pos;
1075 return NULL;
1076}
1077
1078static void c_stop(struct seq_file *m, void *v)
1079{
1080}
1081
2ffd6e18 1082const struct seq_operations cpuinfo_op = {
1da177e4
LT
1083 .start = c_start,
1084 .next = c_next,
1085 .stop = c_stop,
1086 .show = c_show
1087};