]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/arm/kernel/setup.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
1da177e4 19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
be120397 33#include <linux/psci.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
da58fb65 39#include <asm/efi.h>
1da177e4 40#include <asm/elf.h>
2937367b 41#include <asm/early_ioremap.h>
a5f4c561 42#include <asm/fixmap.h>
1da177e4 43#include <asm/procinfo.h>
05774088 44#include <asm/psci.h>
37efe642 45#include <asm/sections.h>
1da177e4 46#include <asm/setup.h>
f00ec48f 47#include <asm/smp_plat.h>
1da177e4
LT
48#include <asm/mach-types.h>
49#include <asm/cacheflush.h>
46097c7d 50#include <asm/cachetype.h>
1da177e4 51#include <asm/tlbflush.h>
5882bfef 52#include <asm/xen/hypervisor.h>
1da177e4 53
93c02ab4 54#include <asm/prom.h>
1da177e4
LT
55#include <asm/mach/arch.h>
56#include <asm/mach/irq.h>
57#include <asm/mach/time.h>
9f97da78
DH
58#include <asm/system_info.h>
59#include <asm/system_misc.h>
5cbad0eb 60#include <asm/traps.h>
bff595c1 61#include <asm/unwind.h>
1c16d242 62#include <asm/memblock.h>
4588c34d 63#include <asm/virt.h>
1da177e4 64
4cd9d6f7 65#include "atags.h"
0fc1c832 66
1da177e4
LT
67
68#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69char fpe_type[8];
70
71static int __init fpe_setup(char *line)
72{
73 memcpy(fpe_type, line, 8);
74 return 1;
75}
76
77__setup("fpe=", fpe_setup);
78#endif
79
ca8f0b0a 80extern void init_default_cache_policy(unsigned long);
ff69a4c8 81extern void paging_init(const struct machine_desc *desc);
b089c31c 82extern void early_mm_init(const struct machine_desc *);
374d446d 83extern void adjust_lowmem_bounds(void);
16d6d5b0 84extern enum reboot_mode reboot_mode;
ff69a4c8 85extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
86
87unsigned int processor_id;
c18f6581 88EXPORT_SYMBOL(processor_id);
0385ebc0 89unsigned int __machine_arch_type __read_mostly;
1da177e4 90EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 91unsigned int cacheid __read_mostly;
c0e95878 92EXPORT_SYMBOL(cacheid);
1da177e4 93
9d20fdd5
BG
94unsigned int __atags_pointer __initdata;
95
1da177e4
LT
96unsigned int system_rev;
97EXPORT_SYMBOL(system_rev);
98
3f599875
PK
99const char *system_serial;
100EXPORT_SYMBOL(system_serial);
101
1da177e4
LT
102unsigned int system_serial_low;
103EXPORT_SYMBOL(system_serial_low);
104
105unsigned int system_serial_high;
106EXPORT_SYMBOL(system_serial_high);
107
0385ebc0 108unsigned int elf_hwcap __read_mostly;
1da177e4
LT
109EXPORT_SYMBOL(elf_hwcap);
110
b342ea4e
AB
111unsigned int elf_hwcap2 __read_mostly;
112EXPORT_SYMBOL(elf_hwcap2);
113
1da177e4
LT
114
115#ifdef MULTI_CPU
7619751f 116struct processor processor __ro_after_init;
383fb3ee
RK
117#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
118struct processor *cpu_vtable[NR_CPUS] = {
119 [0] = &processor,
120};
121#endif
1da177e4
LT
122#endif
123#ifdef MULTI_TLB
7619751f 124struct cpu_tlb_fns cpu_tlb __ro_after_init;
1da177e4
LT
125#endif
126#ifdef MULTI_USER
7619751f 127struct cpu_user_fns cpu_user __ro_after_init;
1da177e4
LT
128#endif
129#ifdef MULTI_CACHE
7619751f 130struct cpu_cache_fns cpu_cache __ro_after_init;
1da177e4 131#endif
953233dc 132#ifdef CONFIG_OUTER_CACHE
7619751f 133struct outer_cache_fns outer_cache __ro_after_init;
6c09f09d 134EXPORT_SYMBOL(outer_cache);
953233dc 135#endif
1da177e4 136
2ecccf90
DM
137/*
138 * Cached cpu_architecture() result for use by assembler code.
139 * C code should use the cpu_architecture() function instead of accessing this
140 * variable directly.
141 */
142int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
143
ccea7a19
RK
144struct stack {
145 u32 irq[3];
146 u32 abt[3];
147 u32 und[3];
c0e7f7ee 148 u32 fiq[3];
ccea7a19
RK
149} ____cacheline_aligned;
150
55bdd694 151#ifndef CONFIG_CPU_V7M
ccea7a19 152static struct stack stacks[NR_CPUS];
55bdd694 153#endif
ccea7a19 154
1da177e4
LT
155char elf_platform[ELF_PLATFORM_SIZE];
156EXPORT_SYMBOL(elf_platform);
157
1da177e4
LT
158static const char *cpu_name;
159static const char *machine_name;
48ab7e09 160static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 161const struct machine_desc *machine_desc __initdata;
1da177e4 162
1da177e4
LT
163static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
164#define ENDIANNESS ((char)endian_test.l)
165
166DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
167
168/*
169 * Standard memory resources
170 */
171static struct resource mem_res[] = {
740e518e
GKH
172 {
173 .name = "Video RAM",
174 .start = 0,
175 .end = 0,
176 .flags = IORESOURCE_MEM
177 },
178 {
a36d8e5b 179 .name = "Kernel code",
740e518e
GKH
180 .start = 0,
181 .end = 0,
35d98e93 182 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
183 },
184 {
185 .name = "Kernel data",
186 .start = 0,
187 .end = 0,
35d98e93 188 .flags = IORESOURCE_SYSTEM_RAM
740e518e 189 }
1da177e4
LT
190};
191
192#define video_ram mem_res[0]
193#define kernel_code mem_res[1]
194#define kernel_data mem_res[2]
195
196static struct resource io_res[] = {
740e518e
GKH
197 {
198 .name = "reserved",
199 .start = 0x3bc,
200 .end = 0x3be,
201 .flags = IORESOURCE_IO | IORESOURCE_BUSY
202 },
203 {
204 .name = "reserved",
205 .start = 0x378,
206 .end = 0x37f,
207 .flags = IORESOURCE_IO | IORESOURCE_BUSY
208 },
209 {
210 .name = "reserved",
211 .start = 0x278,
212 .end = 0x27f,
213 .flags = IORESOURCE_IO | IORESOURCE_BUSY
214 }
1da177e4
LT
215};
216
217#define lp0 io_res[0]
218#define lp1 io_res[1]
219#define lp2 io_res[2]
220
1da177e4
LT
221static const char *proc_arch[] = {
222 "undefined/unknown",
223 "3",
224 "4",
225 "4T",
226 "5",
227 "5T",
228 "5TE",
229 "5TEJ",
230 "6TEJ",
6b090a25 231 "7",
55bdd694 232 "7M",
1da177e4
LT
233 "?(12)",
234 "?(13)",
235 "?(14)",
236 "?(15)",
237 "?(16)",
238 "?(17)",
239};
240
55bdd694
CM
241#ifdef CONFIG_CPU_V7M
242static int __get_cpu_architecture(void)
243{
244 return CPU_ARCH_ARMv7M;
245}
246#else
2ecccf90 247static int __get_cpu_architecture(void)
1da177e4
LT
248{
249 int cpu_arch;
250
0ba8b9b2 251 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 252 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
253 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
254 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
255 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
256 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
257 if (cpu_arch)
258 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 259 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
260 /* Revised CPUID format. Read the Memory Model Feature
261 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 262 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
263 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
264 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
265 cpu_arch = CPU_ARCH_ARMv7;
266 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
267 (mmfr0 & 0x000000f0) == 0x00000020)
268 cpu_arch = CPU_ARCH_ARMv6;
269 else
270 cpu_arch = CPU_ARCH_UNKNOWN;
271 } else
272 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
273
274 return cpu_arch;
275}
55bdd694 276#endif
1da177e4 277
2ecccf90
DM
278int __pure cpu_architecture(void)
279{
280 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
281
282 return __cpu_architecture;
283}
284
8925ec4c
WD
285static int cpu_has_aliasing_icache(unsigned int arch)
286{
287 int aliasing_icache;
288 unsigned int id_reg, num_sets, line_size;
289
7f94e9cc
WD
290 /* PIPT caches never alias. */
291 if (icache_is_pipt())
292 return 0;
293
8925ec4c
WD
294 /* arch specifies the register format */
295 switch (arch) {
296 case CPU_ARCH_ARMv7:
26150aa9 297 set_csselr(CSSELR_ICACHE | CSSELR_L1);
5fb31a96 298 isb();
26150aa9 299 id_reg = read_ccsidr();
8925ec4c
WD
300 line_size = 4 << ((id_reg & 0x7) + 2);
301 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
302 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
303 break;
304 case CPU_ARCH_ARMv6:
305 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
306 break;
307 default:
308 /* I-cache aliases will be handled by D-cache aliasing code */
309 aliasing_icache = 0;
310 }
311
312 return aliasing_icache;
313}
314
c0e95878
RK
315static void __init cacheid_init(void)
316{
c0e95878
RK
317 unsigned int arch = cpu_architecture();
318
f5a5c89e 319 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 320 unsigned int cachetype = read_cpuid_cachetype();
f5a5c89e 321
d360a687 322 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
f5a5c89e
JA
323 cacheid = 0;
324 } else if ((cachetype & (7 << 29)) == 4 << 29) {
b57ee99f 325 /* ARMv7 register format */
72dc53ac 326 arch = CPU_ARCH_ARMv7;
b57ee99f 327 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
328 switch (cachetype & (3 << 14)) {
329 case (1 << 14):
b57ee99f 330 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
331 break;
332 case (3 << 14):
333 cacheid |= CACHEID_PIPT;
334 break;
335 }
8925ec4c 336 } else {
72dc53ac
WD
337 arch = CPU_ARCH_ARMv6;
338 if (cachetype & (1 << 23))
339 cacheid = CACHEID_VIPT_ALIASING;
340 else
341 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 342 }
72dc53ac
WD
343 if (cpu_has_aliasing_icache(arch))
344 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
345 } else {
346 cacheid = CACHEID_VIVT;
347 }
2b4ae1f1 348
1b0f6681 349 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
350 cache_is_vivt() ? "VIVT" :
351 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 352 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
353 cache_is_vivt() ? "VIVT" :
354 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 355 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 356 icache_is_pipt() ? "PIPT" :
2b4ae1f1 357 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
358}
359
1da177e4
LT
360/*
361 * These functions re-use the assembly code in head.S, which
362 * already provide the required functionality.
363 */
0f44ba1d 364extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 365
93c02ab4 366void __init early_print(const char *str, ...)
6fc31d54
RK
367{
368 extern void printascii(const char *);
369 char buf[256];
370 va_list ap;
371
372 va_start(ap, str);
373 vsnprintf(buf, sizeof(buf), str, ap);
374 va_end(ap);
375
376#ifdef CONFIG_DEBUG_LL
377 printascii(buf);
378#endif
379 printk("%s", buf);
380}
381
42f25bdd
NP
382#ifdef CONFIG_ARM_PATCH_IDIV
383
384static inline u32 __attribute_const__ sdiv_instruction(void)
385{
386 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
387 /* "sdiv r0, r0, r1" */
388 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
389 return __opcode_to_mem_thumb32(insn);
390 }
391
392 /* "sdiv r0, r0, r1" */
393 return __opcode_to_mem_arm(0xe710f110);
394}
395
396static inline u32 __attribute_const__ udiv_instruction(void)
397{
398 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
399 /* "udiv r0, r0, r1" */
400 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
401 return __opcode_to_mem_thumb32(insn);
402 }
403
404 /* "udiv r0, r0, r1" */
405 return __opcode_to_mem_arm(0xe730f110);
406}
407
408static inline u32 __attribute_const__ bx_lr_instruction(void)
409{
410 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
411 /* "bx lr; nop" */
412 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
413 return __opcode_to_mem_thumb32(insn);
414 }
415
416 /* "bx lr" */
417 return __opcode_to_mem_arm(0xe12fff1e);
418}
419
420static void __init patch_aeabi_idiv(void)
421{
422 extern void __aeabi_uidiv(void);
423 extern void __aeabi_idiv(void);
424 uintptr_t fn_addr;
425 unsigned int mask;
426
427 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
428 if (!(elf_hwcap & mask))
429 return;
430
431 pr_info("CPU: div instructions available: patching division code\n");
432
433 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 434 asm ("" : "+g" (fn_addr));
42f25bdd
NP
435 ((u32 *)fn_addr)[0] = udiv_instruction();
436 ((u32 *)fn_addr)[1] = bx_lr_instruction();
437 flush_icache_range(fn_addr, fn_addr + 8);
438
439 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 440 asm ("" : "+g" (fn_addr));
42f25bdd
NP
441 ((u32 *)fn_addr)[0] = sdiv_instruction();
442 ((u32 *)fn_addr)[1] = bx_lr_instruction();
443 flush_icache_range(fn_addr, fn_addr + 8);
444}
445
446#else
447static inline void patch_aeabi_idiv(void) { }
448#endif
449
8164f7af
SB
450static void __init cpuid_init_hwcaps(void)
451{
b8c9592b 452 int block;
a092aedb 453 u32 isar5;
8164f7af
SB
454
455 if (cpu_architecture() < CPU_ARCH_ARMv7)
456 return;
457
b8c9592b
AB
458 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
459 if (block >= 2)
8164f7af 460 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 461 if (block >= 1)
8164f7af 462 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
463
464 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
465 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
466 if (block >= 5)
a469abd0 467 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
468
469 /* check for supported v8 Crypto instructions */
470 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
471
472 block = cpuid_feature_extract_field(isar5, 4);
473 if (block >= 2)
474 elf_hwcap2 |= HWCAP2_PMULL;
475 if (block >= 1)
476 elf_hwcap2 |= HWCAP2_AES;
477
478 block = cpuid_feature_extract_field(isar5, 8);
479 if (block >= 1)
480 elf_hwcap2 |= HWCAP2_SHA1;
481
482 block = cpuid_feature_extract_field(isar5, 12);
483 if (block >= 1)
484 elf_hwcap2 |= HWCAP2_SHA2;
485
486 block = cpuid_feature_extract_field(isar5, 16);
487 if (block >= 1)
488 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
489}
490
58171bf2 491static void __init elf_hwcap_fixup(void)
f159f4ed 492{
58171bf2 493 unsigned id = read_cpuid_id();
f159f4ed
TL
494
495 /*
496 * HWCAP_TLS is available only on 1136 r1p0 and later,
497 * see also kuser_get_tls_init.
498 */
58171bf2
RK
499 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
500 ((id >> 20) & 3) == 0) {
f159f4ed 501 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
502 return;
503 }
504
505 /* Verify if CPUID scheme is implemented */
506 if ((id & 0x000f0000) != 0x000f0000)
507 return;
508
509 /*
510 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
511 * avoid advertising SWP; it may not be atomic with
512 * multiprocessing cores.
513 */
b8c9592b
AB
514 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
515 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
03f1217e 516 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
58171bf2 517 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
518}
519
ccea7a19
RK
520/*
521 * cpu_init - initialise one CPU.
522 *
90f1e084 523 * cpu_init sets up the per-CPU stacks.
ccea7a19 524 */
1783d457 525void notrace cpu_init(void)
ccea7a19 526{
55bdd694 527#ifndef CONFIG_CPU_V7M
ccea7a19
RK
528 unsigned int cpu = smp_processor_id();
529 struct stack *stk = &stacks[cpu];
530
531 if (cpu >= NR_CPUS) {
1b0f6681 532 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
533 BUG();
534 }
535
14318efb
RH
536 /*
537 * This only works on resume and secondary cores. For booting on the
538 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
539 */
540 set_my_cpu_offset(per_cpu_offset(cpu));
541
b69874e4
RK
542 cpu_proc_init();
543
b86040a5
CM
544 /*
545 * Define the placement constraint for the inline asm directive below.
546 * In Thumb-2, msr with an immediate value is not allowed.
547 */
548#ifdef CONFIG_THUMB2_KERNEL
549#define PLC "r"
550#else
551#define PLC "I"
552#endif
553
ccea7a19
RK
554 /*
555 * setup stacks for re-entrant exception handlers
556 */
557 __asm__ (
558 "msr cpsr_c, %1\n\t"
b86040a5
CM
559 "add r14, %0, %2\n\t"
560 "mov sp, r14\n\t"
ccea7a19 561 "msr cpsr_c, %3\n\t"
b86040a5
CM
562 "add r14, %0, %4\n\t"
563 "mov sp, r14\n\t"
ccea7a19 564 "msr cpsr_c, %5\n\t"
b86040a5
CM
565 "add r14, %0, %6\n\t"
566 "mov sp, r14\n\t"
c0e7f7ee
DT
567 "msr cpsr_c, %7\n\t"
568 "add r14, %0, %8\n\t"
569 "mov sp, r14\n\t"
570 "msr cpsr_c, %9"
ccea7a19
RK
571 :
572 : "r" (stk),
b86040a5 573 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 574 "I" (offsetof(struct stack, irq[0])),
b86040a5 575 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 576 "I" (offsetof(struct stack, abt[0])),
b86040a5 577 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 578 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
579 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
580 "I" (offsetof(struct stack, fiq[0])),
b86040a5 581 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 582 : "r14");
55bdd694 583#endif
ccea7a19
RK
584}
585
18d7f152 586u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
587
588void __init smp_setup_processor_id(void)
589{
590 int i;
cb8cf4f8
LP
591 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
592 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
593
594 cpu_logical_map(0) = cpu;
cb8cf4f8 595 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
596 cpu_logical_map(i) = i == cpu ? 0 : i;
597
9394c1c6
ML
598 /*
599 * clear __my_cpu_offset on boot CPU to avoid hang caused by
600 * using percpu variable early, for example, lockdep will
601 * access percpu variable inside lock_release
602 */
603 set_my_cpu_offset(0);
604
1b0f6681 605 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
606}
607
8cf72172
LP
608struct mpidr_hash mpidr_hash;
609#ifdef CONFIG_SMP
610/**
611 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
612 * level in order to build a linear index from an
613 * MPIDR value. Resulting algorithm is a collision
614 * free hash carried out through shifting and ORing
615 */
616static void __init smp_build_mpidr_hash(void)
617{
618 u32 i, affinity;
619 u32 fs[3], bits[3], ls, mask = 0;
620 /*
621 * Pre-scan the list of MPIDRS and filter out bits that do
622 * not contribute to affinity levels, ie they never toggle.
623 */
624 for_each_possible_cpu(i)
625 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
626 pr_debug("mask of set bits 0x%x\n", mask);
627 /*
628 * Find and stash the last and first bit set at all affinity levels to
629 * check how many bits are required to represent them.
630 */
631 for (i = 0; i < 3; i++) {
632 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
633 /*
634 * Find the MSB bit and LSB bits position
635 * to determine how many bits are required
636 * to express the affinity level.
637 */
638 ls = fls(affinity);
639 fs[i] = affinity ? ffs(affinity) - 1 : 0;
640 bits[i] = ls - fs[i];
641 }
642 /*
643 * An index can be created from the MPIDR by isolating the
644 * significant bits at each affinity level and by shifting
645 * them in order to compress the 24 bits values space to a
646 * compressed set of values. This is equivalent to hashing
647 * the MPIDR through shifting and ORing. It is a collision free
648 * hash though not minimal since some levels might contain a number
649 * of CPUs that is not an exact power of 2 and their bit
650 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
651 */
652 mpidr_hash.shift_aff[0] = fs[0];
653 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
654 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
655 (bits[1] + bits[0]);
656 mpidr_hash.mask = mask;
657 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
658 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
659 mpidr_hash.shift_aff[0],
660 mpidr_hash.shift_aff[1],
661 mpidr_hash.shift_aff[2],
662 mpidr_hash.mask,
663 mpidr_hash.bits);
664 /*
665 * 4x is an arbitrary value used to warn on a hash table much bigger
666 * than expected on most systems.
667 */
668 if (mpidr_hash_size() > 4 * num_possible_cpus())
669 pr_warn("Large number of MPIDR hash buckets detected\n");
670 sync_cache_w(&mpidr_hash);
671}
672#endif
673
65987a85
RK
674/*
675 * locate processor in the list of supported processor types. The linker
676 * builds this table for us from the entries in arch/arm/mm/proc-*.S
677 */
678struct proc_info_list *lookup_processor(u32 midr)
b69874e4 679{
65987a85 680 struct proc_info_list *list = lookup_processor_type(midr);
b69874e4 681
b69874e4 682 if (!list) {
65987a85
RK
683 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
684 smp_processor_id(), midr);
685 while (1)
686 /* can't use cpu_relax() here as it may require MMU setup */;
b69874e4
RK
687 }
688
65987a85
RK
689 return list;
690}
691
692static void __init setup_processor(void)
693{
694 unsigned int midr = read_cpuid_id();
695 struct proc_info_list *list = lookup_processor(midr);
696
b69874e4 697 cpu_name = list->cpu_name;
2ecccf90 698 __cpu_architecture = __get_cpu_architecture();
b69874e4 699
e209950f 700 init_proc_vtable(list->proc);
b69874e4
RK
701#ifdef MULTI_TLB
702 cpu_tlb = *list->tlb;
703#endif
704#ifdef MULTI_USER
705 cpu_user = *list->user;
706#endif
707#ifdef MULTI_CACHE
708 cpu_cache = *list->cache;
709#endif
710
1b0f6681 711 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
65987a85 712 list->cpu_name, midr, midr & 15,
4585eaff 713 proc_arch[cpu_architecture()], get_cr());
b69874e4 714
a34dbfb0
WD
715 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
716 list->arch_name, ENDIANNESS);
717 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
718 list->elf_name, ENDIANNESS);
b69874e4 719 elf_hwcap = list->elf_hwcap;
8164f7af
SB
720
721 cpuid_init_hwcaps();
42f25bdd 722 patch_aeabi_idiv();
8164f7af 723
b69874e4 724#ifndef CONFIG_ARM_THUMB
c40e3641 725 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 726#endif
ca8f0b0a
RK
727#ifdef CONFIG_MMU
728 init_default_cache_policy(list->__cpu_mm_mmu_flags);
729#endif
92871b94
RH
730 erratum_a15_798181_init();
731
58171bf2 732 elf_hwcap_fixup();
b69874e4
RK
733
734 cacheid_init();
735 cpu_init();
736}
737
93c02ab4 738void __init dump_machine_table(void)
1da177e4 739{
ff69a4c8 740 const struct machine_desc *p;
1da177e4 741
6291319d
GL
742 early_print("Available machine support:\n\nID (hex)\tNAME\n");
743 for_each_machine_desc(p)
dce72dd0 744 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 745
dce72dd0 746 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 747
dce72dd0
NP
748 while (true)
749 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
750}
751
6a5014aa 752int __init arm_add_memory(u64 start, u64 size)
3a669411 753{
6d7d5da7 754 u64 aligned_start;
4b5f32ce 755
3a669411
RK
756 /*
757 * Ensure that start/size are aligned to a page boundary.
909ba297 758 * Size is rounded down, start is rounded up.
3a669411 759 */
6d7d5da7 760 aligned_start = PAGE_ALIGN(start);
909ba297
MY
761 if (aligned_start > start + size)
762 size = 0;
763 else
764 size -= aligned_start - start;
e5ab8580 765
d4a451d5 766#ifndef CONFIG_PHYS_ADDR_T_64BIT
6d7d5da7 767 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
768 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
769 (long long)start);
6d7d5da7
MD
770 return -EINVAL;
771 }
772
773 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
774 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
775 (long long)start);
e5ab8580
WD
776 /*
777 * To ensure bank->start + bank->size is representable in
778 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
779 * This means we lose a page after masking.
780 */
6d7d5da7 781 size = ULONG_MAX - aligned_start;
e5ab8580
WD
782 }
783#endif
784
571b1437
RK
785 if (aligned_start < PHYS_OFFSET) {
786 if (aligned_start + size <= PHYS_OFFSET) {
787 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
788 aligned_start, aligned_start + size);
789 return -EINVAL;
790 }
791
792 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
793 aligned_start, (u64)PHYS_OFFSET);
794
795 size -= PHYS_OFFSET - aligned_start;
796 aligned_start = PHYS_OFFSET;
797 }
798
1c2f87c2
LA
799 start = aligned_start;
800 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
801
802 /*
803 * Check whether this memory region has non-zero size or
804 * invalid node number.
805 */
1c2f87c2 806 if (size == 0)
4b5f32ce
NP
807 return -EINVAL;
808
1c2f87c2 809 memblock_add(start, size);
4b5f32ce 810 return 0;
3a669411
RK
811}
812
1da177e4
LT
813/*
814 * Pick out the memory size. We look for mem=size@start,
815 * where start and size are "size[KkMm]"
816 */
1c2f87c2 817
2b0d8c25 818static int __init early_mem(char *p)
1da177e4
LT
819{
820 static int usermem __initdata = 0;
6a5014aa
MD
821 u64 size;
822 u64 start;
2b0d8c25 823 char *endp;
1da177e4
LT
824
825 /*
826 * If the user specifies memory size, we
827 * blow away any automatically generated
828 * size.
829 */
830 if (usermem == 0) {
831 usermem = 1;
1c2f87c2
LA
832 memblock_remove(memblock_start_of_DRAM(),
833 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
834 }
835
836 start = PHYS_OFFSET;
2b0d8c25
JK
837 size = memparse(p, &endp);
838 if (*endp == '@')
839 start = memparse(endp + 1, NULL);
1da177e4 840
1c97b73e 841 arm_add_memory(start, size);
1da177e4 842
2b0d8c25 843 return 0;
1da177e4 844}
2b0d8c25 845early_param("mem", early_mem);
1da177e4 846
ff69a4c8 847static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 848{
11b9369c 849 struct memblock_region *region;
1da177e4 850 struct resource *res;
1da177e4 851
37efe642 852 kernel_code.start = virt_to_phys(_text);
14c4a533 853 kernel_code.end = virt_to_phys(__init_begin - 1);
842eab40 854 kernel_data.start = virt_to_phys(_sdata);
37efe642 855 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 856
11b9369c 857 for_each_memblock(memory, region) {
966fab00
RK
858 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
859 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
860 unsigned long boot_alias_start;
861
862 /*
863 * Some systems have a special memory alias which is only
864 * used for booting. We need to advertise this region to
865 * kexec-tools so they know where bootable RAM is located.
866 */
867 boot_alias_start = phys_to_idmap(start);
868 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
7e1c4e27 869 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
8a7f97b9
MR
870 if (!res)
871 panic("%s: Failed to allocate %zu bytes\n",
872 __func__, sizeof(*res));
966fab00
RK
873 res->name = "System RAM (boot alias)";
874 res->start = boot_alias_start;
875 res->end = phys_to_idmap(end);
876 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
877 request_resource(&iomem_resource, res);
878 }
879
7e1c4e27 880 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
8a7f97b9
MR
881 if (!res)
882 panic("%s: Failed to allocate %zu bytes\n", __func__,
883 sizeof(*res));
1da177e4 884 res->name = "System RAM";
966fab00
RK
885 res->start = start;
886 res->end = end;
35d98e93 887 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
888
889 request_resource(&iomem_resource, res);
890
891 if (kernel_code.start >= res->start &&
892 kernel_code.end <= res->end)
893 request_resource(res, &kernel_code);
894 if (kernel_data.start >= res->start &&
895 kernel_data.end <= res->end)
896 request_resource(res, &kernel_data);
897 }
898
899 if (mdesc->video_start) {
900 video_ram.start = mdesc->video_start;
901 video_ram.end = mdesc->video_end;
902 request_resource(&iomem_resource, &video_ram);
903 }
904
905 /*
906 * Some machines don't have the possibility of ever
907 * possessing lp0, lp1 or lp2
908 */
909 if (mdesc->reserve_lp0)
910 request_resource(&ioport_resource, &lp0);
911 if (mdesc->reserve_lp1)
912 request_resource(&ioport_resource, &lp1);
913 if (mdesc->reserve_lp2)
914 request_resource(&ioport_resource, &lp2);
915}
916
801820be
AB
917#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
918 defined(CONFIG_EFI)
1da177e4
LT
919struct screen_info screen_info = {
920 .orig_video_lines = 30,
921 .orig_video_cols = 80,
922 .orig_video_mode = 0,
923 .orig_video_ega_bx = 0,
924 .orig_video_isVGA = 1,
925 .orig_video_points = 8
926};
4394c124 927#endif
1da177e4 928
1da177e4
LT
929static int __init customize_machine(void)
930{
883a106b
AB
931 /*
932 * customizes platform devices, or adds new ones
933 * On DT based machines, we fall back to populating the
934 * machine from the device tree, if no callback is provided,
935 * otherwise we would always need an init_machine callback.
936 */
8ff1443c
RK
937 if (machine_desc->init_machine)
938 machine_desc->init_machine();
850bea23 939
1da177e4
LT
940 return 0;
941}
942arch_initcall(customize_machine);
943
90de4137
SG
944static int __init init_machine_late(void)
945{
3f599875
PK
946 struct device_node *root;
947 int ret;
948
90de4137
SG
949 if (machine_desc->init_late)
950 machine_desc->init_late();
3f599875
PK
951
952 root = of_find_node_by_path("/");
953 if (root) {
954 ret = of_property_read_string(root, "serial-number",
955 &system_serial);
956 if (ret)
957 system_serial = NULL;
958 }
959
960 if (!system_serial)
961 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
962 system_serial_high,
963 system_serial_low);
964
90de4137
SG
965 return 0;
966}
967late_initcall(init_machine_late);
968
3c57fb43 969#ifdef CONFIG_KEXEC
61603016
RK
970/*
971 * The crash region must be aligned to 128MB to avoid
972 * zImage relocating below the reserved region.
973 */
974#define CRASH_ALIGN (128 << 20)
61603016 975
3c57fb43
MW
976static inline unsigned long long get_total_mem(void)
977{
978 unsigned long total;
979
980 total = max_low_pfn - min_low_pfn;
981 return total << PAGE_SHIFT;
982}
983
984/**
985 * reserve_crashkernel() - reserves memory are for crash kernel
986 *
987 * This function reserves memory area given in "crashkernel=" kernel command
988 * line parameter. The memory reserved is used by a dump capture kernel when
989 * primary kernel is crashing.
990 */
991static void __init reserve_crashkernel(void)
992{
993 unsigned long long crash_size, crash_base;
994 unsigned long long total_mem;
995 int ret;
996
997 total_mem = get_total_mem();
998 ret = parse_crashkernel(boot_command_line, total_mem,
999 &crash_size, &crash_base);
1000 if (ret)
1001 return;
1002
61603016 1003 if (crash_base <= 0) {
d0506a23 1004 unsigned long long crash_max = idmap_to_phys((u32)~0);
67556d7a
RK
1005 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1006 if (crash_max > lowmem_max)
1007 crash_max = lowmem_max;
61603016
RK
1008 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1009 crash_size, CRASH_ALIGN);
1010 if (!crash_base) {
1011 pr_err("crashkernel reservation failed - No suitable area found.\n");
1012 return;
1013 }
1014 } else {
1015 unsigned long long start;
1016
1017 start = memblock_find_in_range(crash_base,
1018 crash_base + crash_size,
1019 crash_size, SECTION_SIZE);
1020 if (start != crash_base) {
1021 pr_err("crashkernel reservation failed - memory is in use.\n");
1022 return;
1023 }
1024 }
1025
84f452b1 1026 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 1027 if (ret < 0) {
1b0f6681
OJ
1028 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1029 (unsigned long)crash_base);
3c57fb43
MW
1030 return;
1031 }
1032
1b0f6681
OJ
1033 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1034 (unsigned long)(crash_size >> 20),
1035 (unsigned long)(crash_base >> 20),
1036 (unsigned long)(total_mem >> 20));
3c57fb43 1037
f7f0b7dc 1038 /* The crashk resource must always be located in normal mem */
3c57fb43
MW
1039 crashk_res.start = crash_base;
1040 crashk_res.end = crash_base + crash_size - 1;
1041 insert_resource(&iomem_resource, &crashk_res);
f7f0b7dc
RK
1042
1043 if (arm_has_idmap_alias()) {
1044 /*
1045 * If we have a special RAM alias for use at boot, we
1046 * need to advertise to kexec tools where the alias is.
1047 */
1048 static struct resource crashk_boot_res = {
1049 .name = "Crash kernel (boot alias)",
1050 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1051 };
1052
1053 crashk_boot_res.start = phys_to_idmap(crash_base);
1054 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1055 insert_resource(&iomem_resource, &crashk_boot_res);
1056 }
3c57fb43
MW
1057}
1058#else
1059static inline void reserve_crashkernel(void) {}
1060#endif /* CONFIG_KEXEC */
1061
4588c34d
DM
1062void __init hyp_mode_check(void)
1063{
1064#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
1065 sync_boot_mode();
1066
4588c34d
DM
1067 if (is_hyp_mode_available()) {
1068 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1069 pr_info("CPU: Virtualization extensions available.\n");
1070 } else if (is_hyp_mode_mismatched()) {
1071 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1072 __boot_cpu_mode & MODE_MASK);
1073 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1074 } else
1075 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1076#endif
1077}
1078
6291319d
GL
1079void __init setup_arch(char **cmdline_p)
1080{
ff69a4c8 1081 const struct machine_desc *mdesc;
6291319d 1082
6291319d 1083 setup_processor();
93c02ab4
GL
1084 mdesc = setup_machine_fdt(__atags_pointer);
1085 if (!mdesc)
b8b499c8 1086 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
99cf8f90
RK
1087 if (!mdesc) {
1088 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1089 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1090 __atags_pointer);
1091 if (__atags_pointer)
1092 early_print(" r2[]=%*ph\n", 16,
1093 phys_to_virt(__atags_pointer));
1094 dump_machine_table();
1095 }
1096
6291319d
GL
1097 machine_desc = mdesc;
1098 machine_name = mdesc->name;
719c9d14 1099 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1100
16d6d5b0
RH
1101 if (mdesc->reboot_mode != REBOOT_HARD)
1102 reboot_mode = mdesc->reboot_mode;
6291319d 1103
37efe642
RK
1104 init_mm.start_code = (unsigned long) _text;
1105 init_mm.end_code = (unsigned long) _etext;
1106 init_mm.end_data = (unsigned long) _edata;
1107 init_mm.brk = (unsigned long) _end;
1da177e4 1108
48ab7e09
JK
1109 /* populate cmd_line too for later use, preserving boot_command_line */
1110 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1111 *cmdline_p = cmd_line;
2b0d8c25 1112
2937367b
AB
1113 early_fixmap_init();
1114 early_ioremap_init();
a5f4c561 1115
2b0d8c25
JK
1116 parse_early_param();
1117
1221ed10 1118#ifdef CONFIG_MMU
b089c31c 1119 early_mm_init(mdesc);
1221ed10 1120#endif
7c927322 1121 setup_dma_zone(mdesc);
9b08aaa3 1122 xen_early_init();
da58fb65 1123 efi_init();
98562656
LA
1124 /*
1125 * Make sure the calculation for lowmem/highmem is set appropriately
1126 * before reserving/allocating any mmeory
1127 */
374d446d 1128 adjust_lowmem_bounds();
1c2f87c2 1129 arm_memblock_init(mdesc);
98562656
LA
1130 /* Memory may have been removed so recalculate the bounds. */
1131 adjust_lowmem_bounds();
2778f620 1132
2937367b
AB
1133 early_ioremap_reset();
1134
4b5f32ce 1135 paging_init(mdesc);
11b9369c 1136 request_standard_resources(mdesc);
1da177e4 1137
a528721d
RK
1138 if (mdesc->restart)
1139 arm_pm_restart = mdesc->restart;
1140
93c02ab4
GL
1141 unflatten_device_tree();
1142
5587164e 1143 arm_dt_init_cpu_maps();
be120397 1144 psci_dt_init();
7bbb7940 1145#ifdef CONFIG_SMP
abcee5fb 1146 if (is_smp()) {
b382b940
JM
1147 if (!mdesc->smp_init || !mdesc->smp_init()) {
1148 if (psci_smp_available())
1149 smp_set_ops(&psci_smp_ops);
1150 else if (mdesc->smp)
1151 smp_set_ops(mdesc->smp);
1152 }
f00ec48f 1153 smp_init_cpus();
8cf72172 1154 smp_build_mpidr_hash();
abcee5fb 1155 }
7bbb7940 1156#endif
4588c34d
DM
1157
1158 if (!is_smp())
1159 hyp_mode_check();
1160
3c57fb43 1161 reserve_crashkernel();
7bbb7940 1162
4c301f9b 1163#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
52108641 1164 handle_arch_irq = mdesc->handle_irq;
1165#endif
1da177e4
LT
1166
1167#ifdef CONFIG_VT
1168#if defined(CONFIG_VGA_CONSOLE)
1169 conswitchp = &vga_con;
1170#elif defined(CONFIG_DUMMY_CONSOLE)
1171 conswitchp = &dummy_con;
1172#endif
1173#endif
dec12e62
RK
1174
1175 if (mdesc->init_early)
1176 mdesc->init_early();
1da177e4
LT
1177}
1178
1179
1180static int __init topology_init(void)
1181{
1182 int cpu;
1183
66fb8bd2
RK
1184 for_each_possible_cpu(cpu) {
1185 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1186 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1187 register_cpu(&cpuinfo->cpu, cpu);
1188 }
1da177e4
LT
1189
1190 return 0;
1191}
1da177e4
LT
1192subsys_initcall(topology_init);
1193
e119bfff
RK
1194#ifdef CONFIG_HAVE_PROC_CPU
1195static int __init proc_cpu_init(void)
1196{
1197 struct proc_dir_entry *res;
1198
1199 res = proc_mkdir("cpu", NULL);
1200 if (!res)
1201 return -ENOMEM;
1202 return 0;
1203}
1204fs_initcall(proc_cpu_init);
1205#endif
1206
1da177e4
LT
1207static const char *hwcap_str[] = {
1208 "swp",
1209 "half",
1210 "thumb",
1211 "26bit",
1212 "fastmult",
1213 "fpa",
1214 "vfp",
1215 "edsp",
1216 "java",
8f7f9435 1217 "iwmmxt",
99e4a6dd 1218 "crunch",
4369ae16 1219 "thumbee",
2bedbdf4 1220 "neon",
7279dc3e
CM
1221 "vfpv3",
1222 "vfpv3d16",
254cdf8e
WD
1223 "tls",
1224 "vfpv4",
1225 "idiva",
1226 "idivt",
ab8d46c0 1227 "vfpd32",
a469abd0 1228 "lpae",
e9faebc6 1229 "evtstrm",
1da177e4
LT
1230 NULL
1231};
1232
b342ea4e 1233static const char *hwcap2_str[] = {
8258a989
AB
1234 "aes",
1235 "pmull",
1236 "sha1",
1237 "sha2",
1238 "crc32",
b342ea4e
AB
1239 NULL
1240};
1241
1da177e4
LT
1242static int c_show(struct seq_file *m, void *v)
1243{
b4b8f770
LP
1244 int i, j;
1245 u32 cpuid;
1da177e4 1246
1da177e4 1247 for_each_online_cpu(i) {
15559722
RK
1248 /*
1249 * glibc reads /proc/cpuinfo to determine the number of
1250 * online processors, looking for lines beginning with
1251 * "processor". Give glibc what it expects.
1252 */
1253 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1254 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1255 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1256 cpu_name, cpuid & 15, elf_platform);
1257
4bf9636c
PM
1258#if defined(CONFIG_SMP)
1259 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1260 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1261 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1262#else
1263 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1264 loops_per_jiffy / (500000/HZ),
1265 (loops_per_jiffy / (5000/HZ)) % 100);
1266#endif
b4b8f770
LP
1267 /* dump out the processor features */
1268 seq_puts(m, "Features\t: ");
1da177e4 1269
b4b8f770
LP
1270 for (j = 0; hwcap_str[j]; j++)
1271 if (elf_hwcap & (1 << j))
1272 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1273
b342ea4e
AB
1274 for (j = 0; hwcap2_str[j]; j++)
1275 if (elf_hwcap2 & (1 << j))
1276 seq_printf(m, "%s ", hwcap2_str[j]);
1277
b4b8f770
LP
1278 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1279 seq_printf(m, "CPU architecture: %s\n",
1280 proc_arch[cpu_architecture()]);
1da177e4 1281
b4b8f770
LP
1282 if ((cpuid & 0x0008f000) == 0x00000000) {
1283 /* pre-ARM7 */
1284 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1285 } else {
b4b8f770
LP
1286 if ((cpuid & 0x0008f000) == 0x00007000) {
1287 /* ARM7 */
1288 seq_printf(m, "CPU variant\t: 0x%02x\n",
1289 (cpuid >> 16) & 127);
1290 } else {
1291 /* post-ARM7 */
1292 seq_printf(m, "CPU variant\t: 0x%x\n",
1293 (cpuid >> 20) & 15);
1294 }
1295 seq_printf(m, "CPU part\t: 0x%03x\n",
1296 (cpuid >> 4) & 0xfff);
1da177e4 1297 }
b4b8f770 1298 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1299 }
1da177e4
LT
1300
1301 seq_printf(m, "Hardware\t: %s\n", machine_name);
1302 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1303 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1304
1305 return 0;
1306}
1307
1308static void *c_start(struct seq_file *m, loff_t *pos)
1309{
1310 return *pos < 1 ? (void *)1 : NULL;
1311}
1312
1313static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1314{
1315 ++*pos;
1316 return NULL;
1317}
1318
1319static void c_stop(struct seq_file *m, void *v)
1320{
1321}
1322
2ffd6e18 1323const struct seq_operations cpuinfo_op = {
1da177e4
LT
1324 .start = c_start,
1325 .next = c_next,
1326 .stop = c_stop,
1327 .show = c_show
1328};