]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm/kernel/setup.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
894673ee 21#include <linux/screen_info.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
be120397 34#include <linux/psci.h>
1da177e4 35
b86040a5 36#include <asm/unified.h>
15d07dc9 37#include <asm/cp15.h>
1da177e4 38#include <asm/cpu.h>
0ba8b9b2 39#include <asm/cputype.h>
da58fb65 40#include <asm/efi.h>
1da177e4 41#include <asm/elf.h>
2937367b 42#include <asm/early_ioremap.h>
a5f4c561 43#include <asm/fixmap.h>
1da177e4 44#include <asm/procinfo.h>
05774088 45#include <asm/psci.h>
37efe642 46#include <asm/sections.h>
1da177e4 47#include <asm/setup.h>
f00ec48f 48#include <asm/smp_plat.h>
1da177e4
LT
49#include <asm/mach-types.h>
50#include <asm/cacheflush.h>
46097c7d 51#include <asm/cachetype.h>
1da177e4 52#include <asm/tlbflush.h>
5882bfef 53#include <asm/xen/hypervisor.h>
1da177e4 54
93c02ab4 55#include <asm/prom.h>
1da177e4
LT
56#include <asm/mach/arch.h>
57#include <asm/mach/irq.h>
58#include <asm/mach/time.h>
9f97da78
DH
59#include <asm/system_info.h>
60#include <asm/system_misc.h>
5cbad0eb 61#include <asm/traps.h>
bff595c1 62#include <asm/unwind.h>
1c16d242 63#include <asm/memblock.h>
4588c34d 64#include <asm/virt.h>
1da177e4 65
4cd9d6f7 66#include "atags.h"
0fc1c832 67
1da177e4
LT
68
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8];
71
72static int __init fpe_setup(char *line)
73{
74 memcpy(fpe_type, line, 8);
75 return 1;
76}
77
78__setup("fpe=", fpe_setup);
79#endif
80
ca8f0b0a 81extern void init_default_cache_policy(unsigned long);
ff69a4c8 82extern void paging_init(const struct machine_desc *desc);
b089c31c 83extern void early_mm_init(const struct machine_desc *);
374d446d 84extern void adjust_lowmem_bounds(void);
16d6d5b0 85extern enum reboot_mode reboot_mode;
ff69a4c8 86extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
87
88unsigned int processor_id;
c18f6581 89EXPORT_SYMBOL(processor_id);
0385ebc0 90unsigned int __machine_arch_type __read_mostly;
1da177e4 91EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 92unsigned int cacheid __read_mostly;
c0e95878 93EXPORT_SYMBOL(cacheid);
1da177e4 94
9d20fdd5
BG
95unsigned int __atags_pointer __initdata;
96
1da177e4
LT
97unsigned int system_rev;
98EXPORT_SYMBOL(system_rev);
99
3f599875
PK
100const char *system_serial;
101EXPORT_SYMBOL(system_serial);
102
1da177e4
LT
103unsigned int system_serial_low;
104EXPORT_SYMBOL(system_serial_low);
105
106unsigned int system_serial_high;
107EXPORT_SYMBOL(system_serial_high);
108
0385ebc0 109unsigned int elf_hwcap __read_mostly;
1da177e4
LT
110EXPORT_SYMBOL(elf_hwcap);
111
b342ea4e
AB
112unsigned int elf_hwcap2 __read_mostly;
113EXPORT_SYMBOL(elf_hwcap2);
114
1da177e4
LT
115
116#ifdef MULTI_CPU
7619751f 117struct processor processor __ro_after_init;
85fcdd5a
RK
118#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119struct processor *cpu_vtable[NR_CPUS] = {
120 [0] = &processor,
121};
122#endif
1da177e4
LT
123#endif
124#ifdef MULTI_TLB
7619751f 125struct cpu_tlb_fns cpu_tlb __ro_after_init;
1da177e4
LT
126#endif
127#ifdef MULTI_USER
7619751f 128struct cpu_user_fns cpu_user __ro_after_init;
1da177e4
LT
129#endif
130#ifdef MULTI_CACHE
7619751f 131struct cpu_cache_fns cpu_cache __ro_after_init;
1da177e4 132#endif
953233dc 133#ifdef CONFIG_OUTER_CACHE
7619751f 134struct outer_cache_fns outer_cache __ro_after_init;
6c09f09d 135EXPORT_SYMBOL(outer_cache);
953233dc 136#endif
1da177e4 137
2ecccf90
DM
138/*
139 * Cached cpu_architecture() result for use by assembler code.
140 * C code should use the cpu_architecture() function instead of accessing this
141 * variable directly.
142 */
143int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
144
ccea7a19
RK
145struct stack {
146 u32 irq[3];
147 u32 abt[3];
148 u32 und[3];
c0e7f7ee 149 u32 fiq[3];
ccea7a19
RK
150} ____cacheline_aligned;
151
55bdd694 152#ifndef CONFIG_CPU_V7M
ccea7a19 153static struct stack stacks[NR_CPUS];
55bdd694 154#endif
ccea7a19 155
1da177e4
LT
156char elf_platform[ELF_PLATFORM_SIZE];
157EXPORT_SYMBOL(elf_platform);
158
1da177e4
LT
159static const char *cpu_name;
160static const char *machine_name;
48ab7e09 161static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 162const struct machine_desc *machine_desc __initdata;
1da177e4 163
1da177e4
LT
164static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
165#define ENDIANNESS ((char)endian_test.l)
166
167DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
168
169/*
170 * Standard memory resources
171 */
172static struct resource mem_res[] = {
740e518e
GKH
173 {
174 .name = "Video RAM",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 },
179 {
a36d8e5b 180 .name = "Kernel code",
740e518e
GKH
181 .start = 0,
182 .end = 0,
35d98e93 183 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
184 },
185 {
186 .name = "Kernel data",
187 .start = 0,
188 .end = 0,
35d98e93 189 .flags = IORESOURCE_SYSTEM_RAM
740e518e 190 }
1da177e4
LT
191};
192
193#define video_ram mem_res[0]
194#define kernel_code mem_res[1]
195#define kernel_data mem_res[2]
196
197static struct resource io_res[] = {
740e518e
GKH
198 {
199 .name = "reserved",
200 .start = 0x3bc,
201 .end = 0x3be,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 },
204 {
205 .name = "reserved",
206 .start = 0x378,
207 .end = 0x37f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 },
210 {
211 .name = "reserved",
212 .start = 0x278,
213 .end = 0x27f,
214 .flags = IORESOURCE_IO | IORESOURCE_BUSY
215 }
1da177e4
LT
216};
217
218#define lp0 io_res[0]
219#define lp1 io_res[1]
220#define lp2 io_res[2]
221
1da177e4
LT
222static const char *proc_arch[] = {
223 "undefined/unknown",
224 "3",
225 "4",
226 "4T",
227 "5",
228 "5T",
229 "5TE",
230 "5TEJ",
231 "6TEJ",
6b090a25 232 "7",
55bdd694 233 "7M",
1da177e4
LT
234 "?(12)",
235 "?(13)",
236 "?(14)",
237 "?(15)",
238 "?(16)",
239 "?(17)",
240};
241
55bdd694
CM
242#ifdef CONFIG_CPU_V7M
243static int __get_cpu_architecture(void)
244{
245 return CPU_ARCH_ARMv7M;
246}
247#else
2ecccf90 248static int __get_cpu_architecture(void)
1da177e4
LT
249{
250 int cpu_arch;
251
0ba8b9b2 252 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 253 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
254 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
255 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
257 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
258 if (cpu_arch)
259 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 260 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
261 /* Revised CPUID format. Read the Memory Model Feature
262 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 263 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
264 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
265 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
266 cpu_arch = CPU_ARCH_ARMv7;
267 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
268 (mmfr0 & 0x000000f0) == 0x00000020)
269 cpu_arch = CPU_ARCH_ARMv6;
270 else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272 } else
273 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
274
275 return cpu_arch;
276}
55bdd694 277#endif
1da177e4 278
2ecccf90
DM
279int __pure cpu_architecture(void)
280{
281 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
282
283 return __cpu_architecture;
284}
285
8925ec4c
WD
286static int cpu_has_aliasing_icache(unsigned int arch)
287{
288 int aliasing_icache;
289 unsigned int id_reg, num_sets, line_size;
290
7f94e9cc
WD
291 /* PIPT caches never alias. */
292 if (icache_is_pipt())
293 return 0;
294
8925ec4c
WD
295 /* arch specifies the register format */
296 switch (arch) {
297 case CPU_ARCH_ARMv7:
26150aa9 298 set_csselr(CSSELR_ICACHE | CSSELR_L1);
5fb31a96 299 isb();
26150aa9 300 id_reg = read_ccsidr();
8925ec4c
WD
301 line_size = 4 << ((id_reg & 0x7) + 2);
302 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
303 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
304 break;
305 case CPU_ARCH_ARMv6:
306 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
307 break;
308 default:
309 /* I-cache aliases will be handled by D-cache aliasing code */
310 aliasing_icache = 0;
311 }
312
313 return aliasing_icache;
314}
315
c0e95878
RK
316static void __init cacheid_init(void)
317{
c0e95878
RK
318 unsigned int arch = cpu_architecture();
319
f5a5c89e 320 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 321 unsigned int cachetype = read_cpuid_cachetype();
f5a5c89e 322
d360a687 323 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
f5a5c89e
JA
324 cacheid = 0;
325 } else if ((cachetype & (7 << 29)) == 4 << 29) {
b57ee99f 326 /* ARMv7 register format */
72dc53ac 327 arch = CPU_ARCH_ARMv7;
b57ee99f 328 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
329 switch (cachetype & (3 << 14)) {
330 case (1 << 14):
b57ee99f 331 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
332 break;
333 case (3 << 14):
334 cacheid |= CACHEID_PIPT;
335 break;
336 }
8925ec4c 337 } else {
72dc53ac
WD
338 arch = CPU_ARCH_ARMv6;
339 if (cachetype & (1 << 23))
340 cacheid = CACHEID_VIPT_ALIASING;
341 else
342 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 343 }
72dc53ac
WD
344 if (cpu_has_aliasing_icache(arch))
345 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
346 } else {
347 cacheid = CACHEID_VIVT;
348 }
2b4ae1f1 349
1b0f6681 350 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
351 cache_is_vivt() ? "VIVT" :
352 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 353 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
354 cache_is_vivt() ? "VIVT" :
355 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 356 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 357 icache_is_pipt() ? "PIPT" :
2b4ae1f1 358 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
359}
360
1da177e4
LT
361/*
362 * These functions re-use the assembly code in head.S, which
363 * already provide the required functionality.
364 */
0f44ba1d 365extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 366
93c02ab4 367void __init early_print(const char *str, ...)
6fc31d54
RK
368{
369 extern void printascii(const char *);
370 char buf[256];
371 va_list ap;
372
373 va_start(ap, str);
374 vsnprintf(buf, sizeof(buf), str, ap);
375 va_end(ap);
376
377#ifdef CONFIG_DEBUG_LL
378 printascii(buf);
379#endif
380 printk("%s", buf);
381}
382
42f25bdd
NP
383#ifdef CONFIG_ARM_PATCH_IDIV
384
385static inline u32 __attribute_const__ sdiv_instruction(void)
386{
387 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
388 /* "sdiv r0, r0, r1" */
389 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
390 return __opcode_to_mem_thumb32(insn);
391 }
392
393 /* "sdiv r0, r0, r1" */
394 return __opcode_to_mem_arm(0xe710f110);
395}
396
397static inline u32 __attribute_const__ udiv_instruction(void)
398{
399 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
400 /* "udiv r0, r0, r1" */
401 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
402 return __opcode_to_mem_thumb32(insn);
403 }
404
405 /* "udiv r0, r0, r1" */
406 return __opcode_to_mem_arm(0xe730f110);
407}
408
409static inline u32 __attribute_const__ bx_lr_instruction(void)
410{
411 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
412 /* "bx lr; nop" */
413 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
414 return __opcode_to_mem_thumb32(insn);
415 }
416
417 /* "bx lr" */
418 return __opcode_to_mem_arm(0xe12fff1e);
419}
420
421static void __init patch_aeabi_idiv(void)
422{
423 extern void __aeabi_uidiv(void);
424 extern void __aeabi_idiv(void);
425 uintptr_t fn_addr;
426 unsigned int mask;
427
428 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
429 if (!(elf_hwcap & mask))
430 return;
431
432 pr_info("CPU: div instructions available: patching division code\n");
433
434 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 435 asm ("" : "+g" (fn_addr));
42f25bdd
NP
436 ((u32 *)fn_addr)[0] = udiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
439
440 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 441 asm ("" : "+g" (fn_addr));
42f25bdd
NP
442 ((u32 *)fn_addr)[0] = sdiv_instruction();
443 ((u32 *)fn_addr)[1] = bx_lr_instruction();
444 flush_icache_range(fn_addr, fn_addr + 8);
445}
446
447#else
448static inline void patch_aeabi_idiv(void) { }
449#endif
450
8164f7af
SB
451static void __init cpuid_init_hwcaps(void)
452{
b8c9592b 453 int block;
a092aedb 454 u32 isar5;
8164f7af
SB
455
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
458
b8c9592b
AB
459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
8164f7af 461 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 462 if (block >= 1)
8164f7af 463 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
464
465 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
a469abd0 468 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
469
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
472
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
478
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
482
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
486
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
490}
491
58171bf2 492static void __init elf_hwcap_fixup(void)
f159f4ed 493{
58171bf2 494 unsigned id = read_cpuid_id();
f159f4ed
TL
495
496 /*
497 * HWCAP_TLS is available only on 1136 r1p0 and later,
498 * see also kuser_get_tls_init.
499 */
58171bf2
RK
500 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
501 ((id >> 20) & 3) == 0) {
f159f4ed 502 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
503 return;
504 }
505
506 /* Verify if CPUID scheme is implemented */
507 if ((id & 0x000f0000) != 0x000f0000)
508 return;
509
510 /*
511 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
512 * avoid advertising SWP; it may not be atomic with
513 * multiprocessing cores.
514 */
b8c9592b
AB
515 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
516 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
03f1217e 517 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
58171bf2 518 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
519}
520
ccea7a19
RK
521/*
522 * cpu_init - initialise one CPU.
523 *
90f1e084 524 * cpu_init sets up the per-CPU stacks.
ccea7a19 525 */
1783d457 526void notrace cpu_init(void)
ccea7a19 527{
55bdd694 528#ifndef CONFIG_CPU_V7M
ccea7a19
RK
529 unsigned int cpu = smp_processor_id();
530 struct stack *stk = &stacks[cpu];
531
532 if (cpu >= NR_CPUS) {
1b0f6681 533 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
534 BUG();
535 }
536
14318efb
RH
537 /*
538 * This only works on resume and secondary cores. For booting on the
539 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
540 */
541 set_my_cpu_offset(per_cpu_offset(cpu));
542
b69874e4
RK
543 cpu_proc_init();
544
b86040a5
CM
545 /*
546 * Define the placement constraint for the inline asm directive below.
547 * In Thumb-2, msr with an immediate value is not allowed.
548 */
549#ifdef CONFIG_THUMB2_KERNEL
550#define PLC "r"
551#else
552#define PLC "I"
553#endif
554
ccea7a19
RK
555 /*
556 * setup stacks for re-entrant exception handlers
557 */
558 __asm__ (
559 "msr cpsr_c, %1\n\t"
b86040a5
CM
560 "add r14, %0, %2\n\t"
561 "mov sp, r14\n\t"
ccea7a19 562 "msr cpsr_c, %3\n\t"
b86040a5
CM
563 "add r14, %0, %4\n\t"
564 "mov sp, r14\n\t"
ccea7a19 565 "msr cpsr_c, %5\n\t"
b86040a5
CM
566 "add r14, %0, %6\n\t"
567 "mov sp, r14\n\t"
c0e7f7ee
DT
568 "msr cpsr_c, %7\n\t"
569 "add r14, %0, %8\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %9"
ccea7a19
RK
572 :
573 : "r" (stk),
b86040a5 574 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 575 "I" (offsetof(struct stack, irq[0])),
b86040a5 576 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 577 "I" (offsetof(struct stack, abt[0])),
b86040a5 578 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 579 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
580 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
581 "I" (offsetof(struct stack, fiq[0])),
b86040a5 582 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 583 : "r14");
55bdd694 584#endif
ccea7a19
RK
585}
586
18d7f152 587u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
588
589void __init smp_setup_processor_id(void)
590{
591 int i;
cb8cf4f8
LP
592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
594
595 cpu_logical_map(0) = cpu;
cb8cf4f8 596 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
597 cpu_logical_map(i) = i == cpu ? 0 : i;
598
9394c1c6
ML
599 /*
600 * clear __my_cpu_offset on boot CPU to avoid hang caused by
601 * using percpu variable early, for example, lockdep will
602 * access percpu variable inside lock_release
603 */
604 set_my_cpu_offset(0);
605
1b0f6681 606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
607}
608
8cf72172
LP
609struct mpidr_hash mpidr_hash;
610#ifdef CONFIG_SMP
611/**
612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
613 * level in order to build a linear index from an
614 * MPIDR value. Resulting algorithm is a collision
615 * free hash carried out through shifting and ORing
616 */
617static void __init smp_build_mpidr_hash(void)
618{
619 u32 i, affinity;
620 u32 fs[3], bits[3], ls, mask = 0;
621 /*
622 * Pre-scan the list of MPIDRS and filter out bits that do
623 * not contribute to affinity levels, ie they never toggle.
624 */
625 for_each_possible_cpu(i)
626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
627 pr_debug("mask of set bits 0x%x\n", mask);
628 /*
629 * Find and stash the last and first bit set at all affinity levels to
630 * check how many bits are required to represent them.
631 */
632 for (i = 0; i < 3; i++) {
633 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
634 /*
635 * Find the MSB bit and LSB bits position
636 * to determine how many bits are required
637 * to express the affinity level.
638 */
639 ls = fls(affinity);
640 fs[i] = affinity ? ffs(affinity) - 1 : 0;
641 bits[i] = ls - fs[i];
642 }
643 /*
644 * An index can be created from the MPIDR by isolating the
645 * significant bits at each affinity level and by shifting
646 * them in order to compress the 24 bits values space to a
647 * compressed set of values. This is equivalent to hashing
648 * the MPIDR through shifting and ORing. It is a collision free
649 * hash though not minimal since some levels might contain a number
650 * of CPUs that is not an exact power of 2 and their bit
651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
652 */
653 mpidr_hash.shift_aff[0] = fs[0];
654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
656 (bits[1] + bits[0]);
657 mpidr_hash.mask = mask;
658 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
660 mpidr_hash.shift_aff[0],
661 mpidr_hash.shift_aff[1],
662 mpidr_hash.shift_aff[2],
663 mpidr_hash.mask,
664 mpidr_hash.bits);
665 /*
666 * 4x is an arbitrary value used to warn on a hash table much bigger
667 * than expected on most systems.
668 */
669 if (mpidr_hash_size() > 4 * num_possible_cpus())
670 pr_warn("Large number of MPIDR hash buckets detected\n");
671 sync_cache_w(&mpidr_hash);
672}
673#endif
674
adfe2611
RK
675/*
676 * locate processor in the list of supported processor types. The linker
677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
678 */
679struct proc_info_list *lookup_processor(u32 midr)
b69874e4 680{
adfe2611 681 struct proc_info_list *list = lookup_processor_type(midr);
b69874e4 682
b69874e4 683 if (!list) {
adfe2611
RK
684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
685 smp_processor_id(), midr);
686 while (1)
687 /* can't use cpu_relax() here as it may require MMU setup */;
b69874e4
RK
688 }
689
adfe2611
RK
690 return list;
691}
692
693static void __init setup_processor(void)
694{
695 unsigned int midr = read_cpuid_id();
696 struct proc_info_list *list = lookup_processor(midr);
697
b69874e4 698 cpu_name = list->cpu_name;
2ecccf90 699 __cpu_architecture = __get_cpu_architecture();
b69874e4 700
62230039 701 init_proc_vtable(list->proc);
b69874e4
RK
702#ifdef MULTI_TLB
703 cpu_tlb = *list->tlb;
704#endif
705#ifdef MULTI_USER
706 cpu_user = *list->user;
707#endif
708#ifdef MULTI_CACHE
709 cpu_cache = *list->cache;
710#endif
711
1b0f6681 712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
adfe2611 713 list->cpu_name, midr, midr & 15,
4585eaff 714 proc_arch[cpu_architecture()], get_cr());
b69874e4 715
a34dbfb0
WD
716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
717 list->arch_name, ENDIANNESS);
718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
719 list->elf_name, ENDIANNESS);
b69874e4 720 elf_hwcap = list->elf_hwcap;
8164f7af
SB
721
722 cpuid_init_hwcaps();
42f25bdd 723 patch_aeabi_idiv();
8164f7af 724
b69874e4 725#ifndef CONFIG_ARM_THUMB
c40e3641 726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 727#endif
ca8f0b0a
RK
728#ifdef CONFIG_MMU
729 init_default_cache_policy(list->__cpu_mm_mmu_flags);
730#endif
92871b94
RH
731 erratum_a15_798181_init();
732
58171bf2 733 elf_hwcap_fixup();
b69874e4
RK
734
735 cacheid_init();
736 cpu_init();
737}
738
93c02ab4 739void __init dump_machine_table(void)
1da177e4 740{
ff69a4c8 741 const struct machine_desc *p;
1da177e4 742
6291319d
GL
743 early_print("Available machine support:\n\nID (hex)\tNAME\n");
744 for_each_machine_desc(p)
dce72dd0 745 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 746
dce72dd0 747 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 748
dce72dd0
NP
749 while (true)
750 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
751}
752
6a5014aa 753int __init arm_add_memory(u64 start, u64 size)
3a669411 754{
6d7d5da7 755 u64 aligned_start;
4b5f32ce 756
3a669411
RK
757 /*
758 * Ensure that start/size are aligned to a page boundary.
909ba297 759 * Size is rounded down, start is rounded up.
3a669411 760 */
6d7d5da7 761 aligned_start = PAGE_ALIGN(start);
909ba297
MY
762 if (aligned_start > start + size)
763 size = 0;
764 else
765 size -= aligned_start - start;
e5ab8580 766
6d7d5da7
MD
767#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
768 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
770 (long long)start);
6d7d5da7
MD
771 return -EINVAL;
772 }
773
774 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
776 (long long)start);
e5ab8580
WD
777 /*
778 * To ensure bank->start + bank->size is representable in
779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
780 * This means we lose a page after masking.
781 */
6d7d5da7 782 size = ULONG_MAX - aligned_start;
e5ab8580
WD
783 }
784#endif
785
571b1437
RK
786 if (aligned_start < PHYS_OFFSET) {
787 if (aligned_start + size <= PHYS_OFFSET) {
788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
789 aligned_start, aligned_start + size);
790 return -EINVAL;
791 }
792
793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
794 aligned_start, (u64)PHYS_OFFSET);
795
796 size -= PHYS_OFFSET - aligned_start;
797 aligned_start = PHYS_OFFSET;
798 }
799
1c2f87c2
LA
800 start = aligned_start;
801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
802
803 /*
804 * Check whether this memory region has non-zero size or
805 * invalid node number.
806 */
1c2f87c2 807 if (size == 0)
4b5f32ce
NP
808 return -EINVAL;
809
1c2f87c2 810 memblock_add(start, size);
4b5f32ce 811 return 0;
3a669411
RK
812}
813
1da177e4
LT
814/*
815 * Pick out the memory size. We look for mem=size@start,
816 * where start and size are "size[KkMm]"
817 */
1c2f87c2 818
2b0d8c25 819static int __init early_mem(char *p)
1da177e4
LT
820{
821 static int usermem __initdata = 0;
6a5014aa
MD
822 u64 size;
823 u64 start;
2b0d8c25 824 char *endp;
1da177e4
LT
825
826 /*
827 * If the user specifies memory size, we
828 * blow away any automatically generated
829 * size.
830 */
831 if (usermem == 0) {
832 usermem = 1;
1c2f87c2
LA
833 memblock_remove(memblock_start_of_DRAM(),
834 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
835 }
836
837 start = PHYS_OFFSET;
2b0d8c25
JK
838 size = memparse(p, &endp);
839 if (*endp == '@')
840 start = memparse(endp + 1, NULL);
1da177e4 841
1c97b73e 842 arm_add_memory(start, size);
1da177e4 843
2b0d8c25 844 return 0;
1da177e4 845}
2b0d8c25 846early_param("mem", early_mem);
1da177e4 847
ff69a4c8 848static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 849{
11b9369c 850 struct memblock_region *region;
1da177e4 851 struct resource *res;
1da177e4 852
37efe642 853 kernel_code.start = virt_to_phys(_text);
14c4a533 854 kernel_code.end = virt_to_phys(__init_begin - 1);
842eab40 855 kernel_data.start = virt_to_phys(_sdata);
37efe642 856 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 857
11b9369c 858 for_each_memblock(memory, region) {
966fab00
RK
859 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
860 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
861 unsigned long boot_alias_start;
862
863 /*
864 * Some systems have a special memory alias which is only
865 * used for booting. We need to advertise this region to
866 * kexec-tools so they know where bootable RAM is located.
867 */
868 boot_alias_start = phys_to_idmap(start);
869 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
870 res = memblock_virt_alloc(sizeof(*res), 0);
871 res->name = "System RAM (boot alias)";
872 res->start = boot_alias_start;
873 res->end = phys_to_idmap(end);
874 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
875 request_resource(&iomem_resource, res);
876 }
877
ca474408 878 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 879 res->name = "System RAM";
966fab00
RK
880 res->start = start;
881 res->end = end;
35d98e93 882 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
883
884 request_resource(&iomem_resource, res);
885
886 if (kernel_code.start >= res->start &&
887 kernel_code.end <= res->end)
888 request_resource(res, &kernel_code);
889 if (kernel_data.start >= res->start &&
890 kernel_data.end <= res->end)
891 request_resource(res, &kernel_data);
892 }
893
894 if (mdesc->video_start) {
895 video_ram.start = mdesc->video_start;
896 video_ram.end = mdesc->video_end;
897 request_resource(&iomem_resource, &video_ram);
898 }
899
900 /*
901 * Some machines don't have the possibility of ever
902 * possessing lp0, lp1 or lp2
903 */
904 if (mdesc->reserve_lp0)
905 request_resource(&ioport_resource, &lp0);
906 if (mdesc->reserve_lp1)
907 request_resource(&ioport_resource, &lp1);
908 if (mdesc->reserve_lp2)
909 request_resource(&ioport_resource, &lp2);
910}
911
801820be
AB
912#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
913 defined(CONFIG_EFI)
1da177e4
LT
914struct screen_info screen_info = {
915 .orig_video_lines = 30,
916 .orig_video_cols = 80,
917 .orig_video_mode = 0,
918 .orig_video_ega_bx = 0,
919 .orig_video_isVGA = 1,
920 .orig_video_points = 8
921};
4394c124 922#endif
1da177e4 923
1da177e4
LT
924static int __init customize_machine(void)
925{
883a106b
AB
926 /*
927 * customizes platform devices, or adds new ones
928 * On DT based machines, we fall back to populating the
929 * machine from the device tree, if no callback is provided,
930 * otherwise we would always need an init_machine callback.
931 */
8ff1443c
RK
932 if (machine_desc->init_machine)
933 machine_desc->init_machine();
850bea23 934
1da177e4
LT
935 return 0;
936}
937arch_initcall(customize_machine);
938
90de4137
SG
939static int __init init_machine_late(void)
940{
3f599875
PK
941 struct device_node *root;
942 int ret;
943
90de4137
SG
944 if (machine_desc->init_late)
945 machine_desc->init_late();
3f599875
PK
946
947 root = of_find_node_by_path("/");
948 if (root) {
949 ret = of_property_read_string(root, "serial-number",
950 &system_serial);
951 if (ret)
952 system_serial = NULL;
953 }
954
955 if (!system_serial)
956 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
957 system_serial_high,
958 system_serial_low);
959
90de4137
SG
960 return 0;
961}
962late_initcall(init_machine_late);
963
3c57fb43 964#ifdef CONFIG_KEXEC
61603016
RK
965/*
966 * The crash region must be aligned to 128MB to avoid
967 * zImage relocating below the reserved region.
968 */
969#define CRASH_ALIGN (128 << 20)
61603016 970
3c57fb43
MW
971static inline unsigned long long get_total_mem(void)
972{
973 unsigned long total;
974
975 total = max_low_pfn - min_low_pfn;
976 return total << PAGE_SHIFT;
977}
978
979/**
980 * reserve_crashkernel() - reserves memory are for crash kernel
981 *
982 * This function reserves memory area given in "crashkernel=" kernel command
983 * line parameter. The memory reserved is used by a dump capture kernel when
984 * primary kernel is crashing.
985 */
986static void __init reserve_crashkernel(void)
987{
988 unsigned long long crash_size, crash_base;
989 unsigned long long total_mem;
990 int ret;
991
992 total_mem = get_total_mem();
993 ret = parse_crashkernel(boot_command_line, total_mem,
994 &crash_size, &crash_base);
995 if (ret)
996 return;
997
61603016 998 if (crash_base <= 0) {
d0506a23 999 unsigned long long crash_max = idmap_to_phys((u32)~0);
67556d7a
RK
1000 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1001 if (crash_max > lowmem_max)
1002 crash_max = lowmem_max;
61603016
RK
1003 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1004 crash_size, CRASH_ALIGN);
1005 if (!crash_base) {
1006 pr_err("crashkernel reservation failed - No suitable area found.\n");
1007 return;
1008 }
1009 } else {
1010 unsigned long long start;
1011
1012 start = memblock_find_in_range(crash_base,
1013 crash_base + crash_size,
1014 crash_size, SECTION_SIZE);
1015 if (start != crash_base) {
1016 pr_err("crashkernel reservation failed - memory is in use.\n");
1017 return;
1018 }
1019 }
1020
84f452b1 1021 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 1022 if (ret < 0) {
1b0f6681
OJ
1023 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1024 (unsigned long)crash_base);
3c57fb43
MW
1025 return;
1026 }
1027
1b0f6681
OJ
1028 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1029 (unsigned long)(crash_size >> 20),
1030 (unsigned long)(crash_base >> 20),
1031 (unsigned long)(total_mem >> 20));
3c57fb43 1032
f7f0b7dc 1033 /* The crashk resource must always be located in normal mem */
3c57fb43
MW
1034 crashk_res.start = crash_base;
1035 crashk_res.end = crash_base + crash_size - 1;
1036 insert_resource(&iomem_resource, &crashk_res);
f7f0b7dc
RK
1037
1038 if (arm_has_idmap_alias()) {
1039 /*
1040 * If we have a special RAM alias for use at boot, we
1041 * need to advertise to kexec tools where the alias is.
1042 */
1043 static struct resource crashk_boot_res = {
1044 .name = "Crash kernel (boot alias)",
1045 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1046 };
1047
1048 crashk_boot_res.start = phys_to_idmap(crash_base);
1049 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_boot_res);
1051 }
3c57fb43
MW
1052}
1053#else
1054static inline void reserve_crashkernel(void) {}
1055#endif /* CONFIG_KEXEC */
1056
4588c34d
DM
1057void __init hyp_mode_check(void)
1058{
1059#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
1060 sync_boot_mode();
1061
4588c34d
DM
1062 if (is_hyp_mode_available()) {
1063 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1064 pr_info("CPU: Virtualization extensions available.\n");
1065 } else if (is_hyp_mode_mismatched()) {
1066 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1067 __boot_cpu_mode & MODE_MASK);
1068 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1069 } else
1070 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1071#endif
1072}
1073
6291319d
GL
1074void __init setup_arch(char **cmdline_p)
1075{
ff69a4c8 1076 const struct machine_desc *mdesc;
6291319d 1077
6291319d 1078 setup_processor();
93c02ab4
GL
1079 mdesc = setup_machine_fdt(__atags_pointer);
1080 if (!mdesc)
b8b499c8 1081 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
99cf8f90
RK
1082 if (!mdesc) {
1083 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1084 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1085 __atags_pointer);
1086 if (__atags_pointer)
1087 early_print(" r2[]=%*ph\n", 16,
1088 phys_to_virt(__atags_pointer));
1089 dump_machine_table();
1090 }
1091
6291319d
GL
1092 machine_desc = mdesc;
1093 machine_name = mdesc->name;
719c9d14 1094 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1095
16d6d5b0
RH
1096 if (mdesc->reboot_mode != REBOOT_HARD)
1097 reboot_mode = mdesc->reboot_mode;
6291319d 1098
37efe642
RK
1099 init_mm.start_code = (unsigned long) _text;
1100 init_mm.end_code = (unsigned long) _etext;
1101 init_mm.end_data = (unsigned long) _edata;
1102 init_mm.brk = (unsigned long) _end;
1da177e4 1103
48ab7e09
JK
1104 /* populate cmd_line too for later use, preserving boot_command_line */
1105 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1106 *cmdline_p = cmd_line;
2b0d8c25 1107
2937367b
AB
1108 early_fixmap_init();
1109 early_ioremap_init();
a5f4c561 1110
2b0d8c25
JK
1111 parse_early_param();
1112
1221ed10 1113#ifdef CONFIG_MMU
b089c31c 1114 early_mm_init(mdesc);
1221ed10 1115#endif
7c927322 1116 setup_dma_zone(mdesc);
9b08aaa3 1117 xen_early_init();
da58fb65 1118 efi_init();
98562656
LA
1119 /*
1120 * Make sure the calculation for lowmem/highmem is set appropriately
1121 * before reserving/allocating any mmeory
1122 */
374d446d 1123 adjust_lowmem_bounds();
1c2f87c2 1124 arm_memblock_init(mdesc);
98562656
LA
1125 /* Memory may have been removed so recalculate the bounds. */
1126 adjust_lowmem_bounds();
2778f620 1127
2937367b
AB
1128 early_ioremap_reset();
1129
4b5f32ce 1130 paging_init(mdesc);
11b9369c 1131 request_standard_resources(mdesc);
1da177e4 1132
a528721d
RK
1133 if (mdesc->restart)
1134 arm_pm_restart = mdesc->restart;
1135
93c02ab4
GL
1136 unflatten_device_tree();
1137
5587164e 1138 arm_dt_init_cpu_maps();
be120397 1139 psci_dt_init();
7bbb7940 1140#ifdef CONFIG_SMP
abcee5fb 1141 if (is_smp()) {
b382b940
JM
1142 if (!mdesc->smp_init || !mdesc->smp_init()) {
1143 if (psci_smp_available())
1144 smp_set_ops(&psci_smp_ops);
1145 else if (mdesc->smp)
1146 smp_set_ops(mdesc->smp);
1147 }
f00ec48f 1148 smp_init_cpus();
8cf72172 1149 smp_build_mpidr_hash();
abcee5fb 1150 }
7bbb7940 1151#endif
4588c34d
DM
1152
1153 if (!is_smp())
1154 hyp_mode_check();
1155
3c57fb43 1156 reserve_crashkernel();
7bbb7940 1157
52108641 1158#ifdef CONFIG_MULTI_IRQ_HANDLER
1159 handle_arch_irq = mdesc->handle_irq;
1160#endif
1da177e4
LT
1161
1162#ifdef CONFIG_VT
1163#if defined(CONFIG_VGA_CONSOLE)
1164 conswitchp = &vga_con;
1165#elif defined(CONFIG_DUMMY_CONSOLE)
1166 conswitchp = &dummy_con;
1167#endif
1168#endif
dec12e62
RK
1169
1170 if (mdesc->init_early)
1171 mdesc->init_early();
1da177e4
LT
1172}
1173
1174
1175static int __init topology_init(void)
1176{
1177 int cpu;
1178
66fb8bd2
RK
1179 for_each_possible_cpu(cpu) {
1180 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1181 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1182 register_cpu(&cpuinfo->cpu, cpu);
1183 }
1da177e4
LT
1184
1185 return 0;
1186}
1da177e4
LT
1187subsys_initcall(topology_init);
1188
e119bfff
RK
1189#ifdef CONFIG_HAVE_PROC_CPU
1190static int __init proc_cpu_init(void)
1191{
1192 struct proc_dir_entry *res;
1193
1194 res = proc_mkdir("cpu", NULL);
1195 if (!res)
1196 return -ENOMEM;
1197 return 0;
1198}
1199fs_initcall(proc_cpu_init);
1200#endif
1201
1da177e4
LT
1202static const char *hwcap_str[] = {
1203 "swp",
1204 "half",
1205 "thumb",
1206 "26bit",
1207 "fastmult",
1208 "fpa",
1209 "vfp",
1210 "edsp",
1211 "java",
8f7f9435 1212 "iwmmxt",
99e4a6dd 1213 "crunch",
4369ae16 1214 "thumbee",
2bedbdf4 1215 "neon",
7279dc3e
CM
1216 "vfpv3",
1217 "vfpv3d16",
254cdf8e
WD
1218 "tls",
1219 "vfpv4",
1220 "idiva",
1221 "idivt",
ab8d46c0 1222 "vfpd32",
a469abd0 1223 "lpae",
e9faebc6 1224 "evtstrm",
1da177e4
LT
1225 NULL
1226};
1227
b342ea4e 1228static const char *hwcap2_str[] = {
8258a989
AB
1229 "aes",
1230 "pmull",
1231 "sha1",
1232 "sha2",
1233 "crc32",
b342ea4e
AB
1234 NULL
1235};
1236
1da177e4
LT
1237static int c_show(struct seq_file *m, void *v)
1238{
b4b8f770
LP
1239 int i, j;
1240 u32 cpuid;
1da177e4 1241
1da177e4 1242 for_each_online_cpu(i) {
15559722
RK
1243 /*
1244 * glibc reads /proc/cpuinfo to determine the number of
1245 * online processors, looking for lines beginning with
1246 * "processor". Give glibc what it expects.
1247 */
1248 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1249 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1250 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1251 cpu_name, cpuid & 15, elf_platform);
1252
4bf9636c
PM
1253#if defined(CONFIG_SMP)
1254 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1255 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1256 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1257#else
1258 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1259 loops_per_jiffy / (500000/HZ),
1260 (loops_per_jiffy / (5000/HZ)) % 100);
1261#endif
b4b8f770
LP
1262 /* dump out the processor features */
1263 seq_puts(m, "Features\t: ");
1da177e4 1264
b4b8f770
LP
1265 for (j = 0; hwcap_str[j]; j++)
1266 if (elf_hwcap & (1 << j))
1267 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1268
b342ea4e
AB
1269 for (j = 0; hwcap2_str[j]; j++)
1270 if (elf_hwcap2 & (1 << j))
1271 seq_printf(m, "%s ", hwcap2_str[j]);
1272
b4b8f770
LP
1273 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1274 seq_printf(m, "CPU architecture: %s\n",
1275 proc_arch[cpu_architecture()]);
1da177e4 1276
b4b8f770
LP
1277 if ((cpuid & 0x0008f000) == 0x00000000) {
1278 /* pre-ARM7 */
1279 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1280 } else {
b4b8f770
LP
1281 if ((cpuid & 0x0008f000) == 0x00007000) {
1282 /* ARM7 */
1283 seq_printf(m, "CPU variant\t: 0x%02x\n",
1284 (cpuid >> 16) & 127);
1285 } else {
1286 /* post-ARM7 */
1287 seq_printf(m, "CPU variant\t: 0x%x\n",
1288 (cpuid >> 20) & 15);
1289 }
1290 seq_printf(m, "CPU part\t: 0x%03x\n",
1291 (cpuid >> 4) & 0xfff);
1da177e4 1292 }
b4b8f770 1293 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1294 }
1da177e4
LT
1295
1296 seq_printf(m, "Hardware\t: %s\n", machine_name);
1297 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1298 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1299
1300 return 0;
1301}
1302
1303static void *c_start(struct seq_file *m, loff_t *pos)
1304{
1305 return *pos < 1 ? (void *)1 : NULL;
1306}
1307
1308static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1309{
1310 ++*pos;
1311 return NULL;
1312}
1313
1314static void c_stop(struct seq_file *m, void *v)
1315{
1316}
1317
2ffd6e18 1318const struct seq_operations cpuinfo_op = {
1da177e4
LT
1319 .start = c_start,
1320 .next = c_next,
1321 .stop = c_stop,
1322 .show = c_show
1323};