]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/kernel/setup.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
894673ee 21#include <linux/screen_info.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
be120397 34#include <linux/psci.h>
1da177e4 35
b86040a5 36#include <asm/unified.h>
15d07dc9 37#include <asm/cp15.h>
1da177e4 38#include <asm/cpu.h>
0ba8b9b2 39#include <asm/cputype.h>
da58fb65 40#include <asm/efi.h>
1da177e4 41#include <asm/elf.h>
2937367b 42#include <asm/early_ioremap.h>
a5f4c561 43#include <asm/fixmap.h>
1da177e4 44#include <asm/procinfo.h>
05774088 45#include <asm/psci.h>
37efe642 46#include <asm/sections.h>
1da177e4 47#include <asm/setup.h>
f00ec48f 48#include <asm/smp_plat.h>
1da177e4
LT
49#include <asm/mach-types.h>
50#include <asm/cacheflush.h>
46097c7d 51#include <asm/cachetype.h>
1da177e4 52#include <asm/tlbflush.h>
5882bfef 53#include <asm/xen/hypervisor.h>
1da177e4 54
93c02ab4 55#include <asm/prom.h>
1da177e4
LT
56#include <asm/mach/arch.h>
57#include <asm/mach/irq.h>
58#include <asm/mach/time.h>
9f97da78
DH
59#include <asm/system_info.h>
60#include <asm/system_misc.h>
5cbad0eb 61#include <asm/traps.h>
bff595c1 62#include <asm/unwind.h>
1c16d242 63#include <asm/memblock.h>
4588c34d 64#include <asm/virt.h>
1da177e4 65
4cd9d6f7 66#include "atags.h"
0fc1c832 67
1da177e4
LT
68
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8];
71
72static int __init fpe_setup(char *line)
73{
74 memcpy(fpe_type, line, 8);
75 return 1;
76}
77
78__setup("fpe=", fpe_setup);
79#endif
80
ca8f0b0a 81extern void init_default_cache_policy(unsigned long);
ff69a4c8 82extern void paging_init(const struct machine_desc *desc);
b089c31c 83extern void early_mm_init(const struct machine_desc *);
374d446d 84extern void adjust_lowmem_bounds(void);
16d6d5b0 85extern enum reboot_mode reboot_mode;
ff69a4c8 86extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
87
88unsigned int processor_id;
c18f6581 89EXPORT_SYMBOL(processor_id);
0385ebc0 90unsigned int __machine_arch_type __read_mostly;
1da177e4 91EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 92unsigned int cacheid __read_mostly;
c0e95878 93EXPORT_SYMBOL(cacheid);
1da177e4 94
9d20fdd5
BG
95unsigned int __atags_pointer __initdata;
96
1da177e4
LT
97unsigned int system_rev;
98EXPORT_SYMBOL(system_rev);
99
3f599875
PK
100const char *system_serial;
101EXPORT_SYMBOL(system_serial);
102
1da177e4
LT
103unsigned int system_serial_low;
104EXPORT_SYMBOL(system_serial_low);
105
106unsigned int system_serial_high;
107EXPORT_SYMBOL(system_serial_high);
108
0385ebc0 109unsigned int elf_hwcap __read_mostly;
1da177e4
LT
110EXPORT_SYMBOL(elf_hwcap);
111
b342ea4e
AB
112unsigned int elf_hwcap2 __read_mostly;
113EXPORT_SYMBOL(elf_hwcap2);
114
1da177e4
LT
115
116#ifdef MULTI_CPU
7619751f 117struct processor processor __ro_after_init;
1da177e4
LT
118#endif
119#ifdef MULTI_TLB
7619751f 120struct cpu_tlb_fns cpu_tlb __ro_after_init;
1da177e4
LT
121#endif
122#ifdef MULTI_USER
7619751f 123struct cpu_user_fns cpu_user __ro_after_init;
1da177e4
LT
124#endif
125#ifdef MULTI_CACHE
7619751f 126struct cpu_cache_fns cpu_cache __ro_after_init;
1da177e4 127#endif
953233dc 128#ifdef CONFIG_OUTER_CACHE
7619751f 129struct outer_cache_fns outer_cache __ro_after_init;
6c09f09d 130EXPORT_SYMBOL(outer_cache);
953233dc 131#endif
1da177e4 132
2ecccf90
DM
133/*
134 * Cached cpu_architecture() result for use by assembler code.
135 * C code should use the cpu_architecture() function instead of accessing this
136 * variable directly.
137 */
138int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
139
ccea7a19
RK
140struct stack {
141 u32 irq[3];
142 u32 abt[3];
143 u32 und[3];
c0e7f7ee 144 u32 fiq[3];
ccea7a19
RK
145} ____cacheline_aligned;
146
55bdd694 147#ifndef CONFIG_CPU_V7M
ccea7a19 148static struct stack stacks[NR_CPUS];
55bdd694 149#endif
ccea7a19 150
1da177e4
LT
151char elf_platform[ELF_PLATFORM_SIZE];
152EXPORT_SYMBOL(elf_platform);
153
1da177e4
LT
154static const char *cpu_name;
155static const char *machine_name;
48ab7e09 156static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 157const struct machine_desc *machine_desc __initdata;
1da177e4 158
1da177e4
LT
159static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160#define ENDIANNESS ((char)endian_test.l)
161
162DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
163
164/*
165 * Standard memory resources
166 */
167static struct resource mem_res[] = {
740e518e
GKH
168 {
169 .name = "Video RAM",
170 .start = 0,
171 .end = 0,
172 .flags = IORESOURCE_MEM
173 },
174 {
a36d8e5b 175 .name = "Kernel code",
740e518e
GKH
176 .start = 0,
177 .end = 0,
35d98e93 178 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
179 },
180 {
181 .name = "Kernel data",
182 .start = 0,
183 .end = 0,
35d98e93 184 .flags = IORESOURCE_SYSTEM_RAM
740e518e 185 }
1da177e4
LT
186};
187
188#define video_ram mem_res[0]
189#define kernel_code mem_res[1]
190#define kernel_data mem_res[2]
191
192static struct resource io_res[] = {
740e518e
GKH
193 {
194 .name = "reserved",
195 .start = 0x3bc,
196 .end = 0x3be,
197 .flags = IORESOURCE_IO | IORESOURCE_BUSY
198 },
199 {
200 .name = "reserved",
201 .start = 0x378,
202 .end = 0x37f,
203 .flags = IORESOURCE_IO | IORESOURCE_BUSY
204 },
205 {
206 .name = "reserved",
207 .start = 0x278,
208 .end = 0x27f,
209 .flags = IORESOURCE_IO | IORESOURCE_BUSY
210 }
1da177e4
LT
211};
212
213#define lp0 io_res[0]
214#define lp1 io_res[1]
215#define lp2 io_res[2]
216
1da177e4
LT
217static const char *proc_arch[] = {
218 "undefined/unknown",
219 "3",
220 "4",
221 "4T",
222 "5",
223 "5T",
224 "5TE",
225 "5TEJ",
226 "6TEJ",
6b090a25 227 "7",
55bdd694 228 "7M",
1da177e4
LT
229 "?(12)",
230 "?(13)",
231 "?(14)",
232 "?(15)",
233 "?(16)",
234 "?(17)",
235};
236
55bdd694
CM
237#ifdef CONFIG_CPU_V7M
238static int __get_cpu_architecture(void)
239{
240 return CPU_ARCH_ARMv7M;
241}
242#else
2ecccf90 243static int __get_cpu_architecture(void)
1da177e4
LT
244{
245 int cpu_arch;
246
0ba8b9b2 247 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 248 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
249 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
253 if (cpu_arch)
254 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 255 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
256 /* Revised CPUID format. Read the Memory Model Feature
257 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 258 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
259 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
261 cpu_arch = CPU_ARCH_ARMv7;
262 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 (mmfr0 & 0x000000f0) == 0x00000020)
264 cpu_arch = CPU_ARCH_ARMv6;
265 else
266 cpu_arch = CPU_ARCH_UNKNOWN;
267 } else
268 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
269
270 return cpu_arch;
271}
55bdd694 272#endif
1da177e4 273
2ecccf90
DM
274int __pure cpu_architecture(void)
275{
276 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
277
278 return __cpu_architecture;
279}
280
8925ec4c
WD
281static int cpu_has_aliasing_icache(unsigned int arch)
282{
283 int aliasing_icache;
284 unsigned int id_reg, num_sets, line_size;
285
7f94e9cc
WD
286 /* PIPT caches never alias. */
287 if (icache_is_pipt())
288 return 0;
289
8925ec4c
WD
290 /* arch specifies the register format */
291 switch (arch) {
292 case CPU_ARCH_ARMv7:
26150aa9 293 set_csselr(CSSELR_ICACHE | CSSELR_L1);
5fb31a96 294 isb();
26150aa9 295 id_reg = read_ccsidr();
8925ec4c
WD
296 line_size = 4 << ((id_reg & 0x7) + 2);
297 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
298 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
299 break;
300 case CPU_ARCH_ARMv6:
301 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
302 break;
303 default:
304 /* I-cache aliases will be handled by D-cache aliasing code */
305 aliasing_icache = 0;
306 }
307
308 return aliasing_icache;
309}
310
c0e95878
RK
311static void __init cacheid_init(void)
312{
c0e95878
RK
313 unsigned int arch = cpu_architecture();
314
f5a5c89e 315 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 316 unsigned int cachetype = read_cpuid_cachetype();
f5a5c89e 317
d360a687 318 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
f5a5c89e
JA
319 cacheid = 0;
320 } else if ((cachetype & (7 << 29)) == 4 << 29) {
b57ee99f 321 /* ARMv7 register format */
72dc53ac 322 arch = CPU_ARCH_ARMv7;
b57ee99f 323 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
324 switch (cachetype & (3 << 14)) {
325 case (1 << 14):
b57ee99f 326 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
327 break;
328 case (3 << 14):
329 cacheid |= CACHEID_PIPT;
330 break;
331 }
8925ec4c 332 } else {
72dc53ac
WD
333 arch = CPU_ARCH_ARMv6;
334 if (cachetype & (1 << 23))
335 cacheid = CACHEID_VIPT_ALIASING;
336 else
337 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 338 }
72dc53ac
WD
339 if (cpu_has_aliasing_icache(arch))
340 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
341 } else {
342 cacheid = CACHEID_VIVT;
343 }
2b4ae1f1 344
1b0f6681 345 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
346 cache_is_vivt() ? "VIVT" :
347 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 348 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
349 cache_is_vivt() ? "VIVT" :
350 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 351 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 352 icache_is_pipt() ? "PIPT" :
2b4ae1f1 353 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
354}
355
1da177e4
LT
356/*
357 * These functions re-use the assembly code in head.S, which
358 * already provide the required functionality.
359 */
0f44ba1d 360extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 361
93c02ab4 362void __init early_print(const char *str, ...)
6fc31d54
RK
363{
364 extern void printascii(const char *);
365 char buf[256];
366 va_list ap;
367
368 va_start(ap, str);
369 vsnprintf(buf, sizeof(buf), str, ap);
370 va_end(ap);
371
372#ifdef CONFIG_DEBUG_LL
373 printascii(buf);
374#endif
375 printk("%s", buf);
376}
377
42f25bdd
NP
378#ifdef CONFIG_ARM_PATCH_IDIV
379
380static inline u32 __attribute_const__ sdiv_instruction(void)
381{
382 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
383 /* "sdiv r0, r0, r1" */
384 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
385 return __opcode_to_mem_thumb32(insn);
386 }
387
388 /* "sdiv r0, r0, r1" */
389 return __opcode_to_mem_arm(0xe710f110);
390}
391
392static inline u32 __attribute_const__ udiv_instruction(void)
393{
394 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
395 /* "udiv r0, r0, r1" */
396 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
397 return __opcode_to_mem_thumb32(insn);
398 }
399
400 /* "udiv r0, r0, r1" */
401 return __opcode_to_mem_arm(0xe730f110);
402}
403
404static inline u32 __attribute_const__ bx_lr_instruction(void)
405{
406 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
407 /* "bx lr; nop" */
408 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
409 return __opcode_to_mem_thumb32(insn);
410 }
411
412 /* "bx lr" */
413 return __opcode_to_mem_arm(0xe12fff1e);
414}
415
416static void __init patch_aeabi_idiv(void)
417{
418 extern void __aeabi_uidiv(void);
419 extern void __aeabi_idiv(void);
420 uintptr_t fn_addr;
421 unsigned int mask;
422
423 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
424 if (!(elf_hwcap & mask))
425 return;
426
427 pr_info("CPU: div instructions available: patching division code\n");
428
429 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 430 asm ("" : "+g" (fn_addr));
42f25bdd
NP
431 ((u32 *)fn_addr)[0] = udiv_instruction();
432 ((u32 *)fn_addr)[1] = bx_lr_instruction();
433 flush_icache_range(fn_addr, fn_addr + 8);
434
435 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 436 asm ("" : "+g" (fn_addr));
42f25bdd
NP
437 ((u32 *)fn_addr)[0] = sdiv_instruction();
438 ((u32 *)fn_addr)[1] = bx_lr_instruction();
439 flush_icache_range(fn_addr, fn_addr + 8);
440}
441
442#else
443static inline void patch_aeabi_idiv(void) { }
444#endif
445
8164f7af
SB
446static void __init cpuid_init_hwcaps(void)
447{
b8c9592b 448 int block;
a092aedb 449 u32 isar5;
8164f7af
SB
450
451 if (cpu_architecture() < CPU_ARCH_ARMv7)
452 return;
453
b8c9592b
AB
454 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
455 if (block >= 2)
8164f7af 456 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 457 if (block >= 1)
8164f7af 458 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
459
460 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
461 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
462 if (block >= 5)
a469abd0 463 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
464
465 /* check for supported v8 Crypto instructions */
466 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
467
468 block = cpuid_feature_extract_field(isar5, 4);
469 if (block >= 2)
470 elf_hwcap2 |= HWCAP2_PMULL;
471 if (block >= 1)
472 elf_hwcap2 |= HWCAP2_AES;
473
474 block = cpuid_feature_extract_field(isar5, 8);
475 if (block >= 1)
476 elf_hwcap2 |= HWCAP2_SHA1;
477
478 block = cpuid_feature_extract_field(isar5, 12);
479 if (block >= 1)
480 elf_hwcap2 |= HWCAP2_SHA2;
481
482 block = cpuid_feature_extract_field(isar5, 16);
483 if (block >= 1)
484 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
485}
486
58171bf2 487static void __init elf_hwcap_fixup(void)
f159f4ed 488{
58171bf2 489 unsigned id = read_cpuid_id();
f159f4ed
TL
490
491 /*
492 * HWCAP_TLS is available only on 1136 r1p0 and later,
493 * see also kuser_get_tls_init.
494 */
58171bf2
RK
495 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
496 ((id >> 20) & 3) == 0) {
f159f4ed 497 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
498 return;
499 }
500
501 /* Verify if CPUID scheme is implemented */
502 if ((id & 0x000f0000) != 0x000f0000)
503 return;
504
505 /*
506 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
507 * avoid advertising SWP; it may not be atomic with
508 * multiprocessing cores.
509 */
b8c9592b
AB
510 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
511 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
03f1217e 512 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
58171bf2 513 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
514}
515
ccea7a19
RK
516/*
517 * cpu_init - initialise one CPU.
518 *
90f1e084 519 * cpu_init sets up the per-CPU stacks.
ccea7a19 520 */
1783d457 521void notrace cpu_init(void)
ccea7a19 522{
55bdd694 523#ifndef CONFIG_CPU_V7M
ccea7a19
RK
524 unsigned int cpu = smp_processor_id();
525 struct stack *stk = &stacks[cpu];
526
527 if (cpu >= NR_CPUS) {
1b0f6681 528 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
529 BUG();
530 }
531
14318efb
RH
532 /*
533 * This only works on resume and secondary cores. For booting on the
534 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
535 */
536 set_my_cpu_offset(per_cpu_offset(cpu));
537
b69874e4
RK
538 cpu_proc_init();
539
b86040a5
CM
540 /*
541 * Define the placement constraint for the inline asm directive below.
542 * In Thumb-2, msr with an immediate value is not allowed.
543 */
544#ifdef CONFIG_THUMB2_KERNEL
545#define PLC "r"
546#else
547#define PLC "I"
548#endif
549
ccea7a19
RK
550 /*
551 * setup stacks for re-entrant exception handlers
552 */
553 __asm__ (
554 "msr cpsr_c, %1\n\t"
b86040a5
CM
555 "add r14, %0, %2\n\t"
556 "mov sp, r14\n\t"
ccea7a19 557 "msr cpsr_c, %3\n\t"
b86040a5
CM
558 "add r14, %0, %4\n\t"
559 "mov sp, r14\n\t"
ccea7a19 560 "msr cpsr_c, %5\n\t"
b86040a5
CM
561 "add r14, %0, %6\n\t"
562 "mov sp, r14\n\t"
c0e7f7ee
DT
563 "msr cpsr_c, %7\n\t"
564 "add r14, %0, %8\n\t"
565 "mov sp, r14\n\t"
566 "msr cpsr_c, %9"
ccea7a19
RK
567 :
568 : "r" (stk),
b86040a5 569 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 570 "I" (offsetof(struct stack, irq[0])),
b86040a5 571 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 572 "I" (offsetof(struct stack, abt[0])),
b86040a5 573 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 574 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
575 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
576 "I" (offsetof(struct stack, fiq[0])),
b86040a5 577 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 578 : "r14");
55bdd694 579#endif
ccea7a19
RK
580}
581
18d7f152 582u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
583
584void __init smp_setup_processor_id(void)
585{
586 int i;
cb8cf4f8
LP
587 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
588 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
589
590 cpu_logical_map(0) = cpu;
cb8cf4f8 591 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
592 cpu_logical_map(i) = i == cpu ? 0 : i;
593
9394c1c6
ML
594 /*
595 * clear __my_cpu_offset on boot CPU to avoid hang caused by
596 * using percpu variable early, for example, lockdep will
597 * access percpu variable inside lock_release
598 */
599 set_my_cpu_offset(0);
600
1b0f6681 601 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
602}
603
8cf72172
LP
604struct mpidr_hash mpidr_hash;
605#ifdef CONFIG_SMP
606/**
607 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
608 * level in order to build a linear index from an
609 * MPIDR value. Resulting algorithm is a collision
610 * free hash carried out through shifting and ORing
611 */
612static void __init smp_build_mpidr_hash(void)
613{
614 u32 i, affinity;
615 u32 fs[3], bits[3], ls, mask = 0;
616 /*
617 * Pre-scan the list of MPIDRS and filter out bits that do
618 * not contribute to affinity levels, ie they never toggle.
619 */
620 for_each_possible_cpu(i)
621 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
622 pr_debug("mask of set bits 0x%x\n", mask);
623 /*
624 * Find and stash the last and first bit set at all affinity levels to
625 * check how many bits are required to represent them.
626 */
627 for (i = 0; i < 3; i++) {
628 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
629 /*
630 * Find the MSB bit and LSB bits position
631 * to determine how many bits are required
632 * to express the affinity level.
633 */
634 ls = fls(affinity);
635 fs[i] = affinity ? ffs(affinity) - 1 : 0;
636 bits[i] = ls - fs[i];
637 }
638 /*
639 * An index can be created from the MPIDR by isolating the
640 * significant bits at each affinity level and by shifting
641 * them in order to compress the 24 bits values space to a
642 * compressed set of values. This is equivalent to hashing
643 * the MPIDR through shifting and ORing. It is a collision free
644 * hash though not minimal since some levels might contain a number
645 * of CPUs that is not an exact power of 2 and their bit
646 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
647 */
648 mpidr_hash.shift_aff[0] = fs[0];
649 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
650 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
651 (bits[1] + bits[0]);
652 mpidr_hash.mask = mask;
653 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
654 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
655 mpidr_hash.shift_aff[0],
656 mpidr_hash.shift_aff[1],
657 mpidr_hash.shift_aff[2],
658 mpidr_hash.mask,
659 mpidr_hash.bits);
660 /*
661 * 4x is an arbitrary value used to warn on a hash table much bigger
662 * than expected on most systems.
663 */
664 if (mpidr_hash_size() > 4 * num_possible_cpus())
665 pr_warn("Large number of MPIDR hash buckets detected\n");
666 sync_cache_w(&mpidr_hash);
667}
668#endif
669
b69874e4
RK
670static void __init setup_processor(void)
671{
672 struct proc_info_list *list;
673
674 /*
675 * locate processor in the list of supported processor
676 * types. The linker builds this table for us from the
677 * entries in arch/arm/mm/proc-*.S
678 */
679 list = lookup_processor_type(read_cpuid_id());
680 if (!list) {
1b0f6681
OJ
681 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
682 read_cpuid_id());
b69874e4
RK
683 while (1);
684 }
685
686 cpu_name = list->cpu_name;
2ecccf90 687 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
688
689#ifdef MULTI_CPU
690 processor = *list->proc;
691#endif
692#ifdef MULTI_TLB
693 cpu_tlb = *list->tlb;
694#endif
695#ifdef MULTI_USER
696 cpu_user = *list->user;
697#endif
698#ifdef MULTI_CACHE
699 cpu_cache = *list->cache;
700#endif
701
1b0f6681
OJ
702 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
703 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 704 proc_arch[cpu_architecture()], get_cr());
b69874e4 705
a34dbfb0
WD
706 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
707 list->arch_name, ENDIANNESS);
708 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
709 list->elf_name, ENDIANNESS);
b69874e4 710 elf_hwcap = list->elf_hwcap;
8164f7af
SB
711
712 cpuid_init_hwcaps();
42f25bdd 713 patch_aeabi_idiv();
8164f7af 714
b69874e4 715#ifndef CONFIG_ARM_THUMB
c40e3641 716 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 717#endif
ca8f0b0a
RK
718#ifdef CONFIG_MMU
719 init_default_cache_policy(list->__cpu_mm_mmu_flags);
720#endif
92871b94
RH
721 erratum_a15_798181_init();
722
58171bf2 723 elf_hwcap_fixup();
b69874e4
RK
724
725 cacheid_init();
726 cpu_init();
727}
728
93c02ab4 729void __init dump_machine_table(void)
1da177e4 730{
ff69a4c8 731 const struct machine_desc *p;
1da177e4 732
6291319d
GL
733 early_print("Available machine support:\n\nID (hex)\tNAME\n");
734 for_each_machine_desc(p)
dce72dd0 735 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 736
dce72dd0 737 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 738
dce72dd0
NP
739 while (true)
740 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
741}
742
6a5014aa 743int __init arm_add_memory(u64 start, u64 size)
3a669411 744{
6d7d5da7 745 u64 aligned_start;
4b5f32ce 746
3a669411
RK
747 /*
748 * Ensure that start/size are aligned to a page boundary.
909ba297 749 * Size is rounded down, start is rounded up.
3a669411 750 */
6d7d5da7 751 aligned_start = PAGE_ALIGN(start);
909ba297
MY
752 if (aligned_start > start + size)
753 size = 0;
754 else
755 size -= aligned_start - start;
e5ab8580 756
6d7d5da7
MD
757#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
758 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
759 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
760 (long long)start);
6d7d5da7
MD
761 return -EINVAL;
762 }
763
764 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
765 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
766 (long long)start);
e5ab8580
WD
767 /*
768 * To ensure bank->start + bank->size is representable in
769 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
770 * This means we lose a page after masking.
771 */
6d7d5da7 772 size = ULONG_MAX - aligned_start;
e5ab8580
WD
773 }
774#endif
775
571b1437
RK
776 if (aligned_start < PHYS_OFFSET) {
777 if (aligned_start + size <= PHYS_OFFSET) {
778 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
779 aligned_start, aligned_start + size);
780 return -EINVAL;
781 }
782
783 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
784 aligned_start, (u64)PHYS_OFFSET);
785
786 size -= PHYS_OFFSET - aligned_start;
787 aligned_start = PHYS_OFFSET;
788 }
789
1c2f87c2
LA
790 start = aligned_start;
791 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
792
793 /*
794 * Check whether this memory region has non-zero size or
795 * invalid node number.
796 */
1c2f87c2 797 if (size == 0)
4b5f32ce
NP
798 return -EINVAL;
799
1c2f87c2 800 memblock_add(start, size);
4b5f32ce 801 return 0;
3a669411
RK
802}
803
1da177e4
LT
804/*
805 * Pick out the memory size. We look for mem=size@start,
806 * where start and size are "size[KkMm]"
807 */
1c2f87c2 808
2b0d8c25 809static int __init early_mem(char *p)
1da177e4
LT
810{
811 static int usermem __initdata = 0;
6a5014aa
MD
812 u64 size;
813 u64 start;
2b0d8c25 814 char *endp;
1da177e4
LT
815
816 /*
817 * If the user specifies memory size, we
818 * blow away any automatically generated
819 * size.
820 */
821 if (usermem == 0) {
822 usermem = 1;
1c2f87c2
LA
823 memblock_remove(memblock_start_of_DRAM(),
824 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
825 }
826
827 start = PHYS_OFFSET;
2b0d8c25
JK
828 size = memparse(p, &endp);
829 if (*endp == '@')
830 start = memparse(endp + 1, NULL);
1da177e4 831
1c97b73e 832 arm_add_memory(start, size);
1da177e4 833
2b0d8c25 834 return 0;
1da177e4 835}
2b0d8c25 836early_param("mem", early_mem);
1da177e4 837
ff69a4c8 838static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 839{
11b9369c 840 struct memblock_region *region;
1da177e4 841 struct resource *res;
1da177e4 842
37efe642 843 kernel_code.start = virt_to_phys(_text);
14c4a533 844 kernel_code.end = virt_to_phys(__init_begin - 1);
842eab40 845 kernel_data.start = virt_to_phys(_sdata);
37efe642 846 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 847
11b9369c 848 for_each_memblock(memory, region) {
966fab00
RK
849 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
850 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
851 unsigned long boot_alias_start;
852
853 /*
854 * Some systems have a special memory alias which is only
855 * used for booting. We need to advertise this region to
856 * kexec-tools so they know where bootable RAM is located.
857 */
858 boot_alias_start = phys_to_idmap(start);
859 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
860 res = memblock_virt_alloc(sizeof(*res), 0);
861 res->name = "System RAM (boot alias)";
862 res->start = boot_alias_start;
863 res->end = phys_to_idmap(end);
864 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
865 request_resource(&iomem_resource, res);
866 }
867
ca474408 868 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 869 res->name = "System RAM";
966fab00
RK
870 res->start = start;
871 res->end = end;
35d98e93 872 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
873
874 request_resource(&iomem_resource, res);
875
876 if (kernel_code.start >= res->start &&
877 kernel_code.end <= res->end)
878 request_resource(res, &kernel_code);
879 if (kernel_data.start >= res->start &&
880 kernel_data.end <= res->end)
881 request_resource(res, &kernel_data);
882 }
883
884 if (mdesc->video_start) {
885 video_ram.start = mdesc->video_start;
886 video_ram.end = mdesc->video_end;
887 request_resource(&iomem_resource, &video_ram);
888 }
889
890 /*
891 * Some machines don't have the possibility of ever
892 * possessing lp0, lp1 or lp2
893 */
894 if (mdesc->reserve_lp0)
895 request_resource(&ioport_resource, &lp0);
896 if (mdesc->reserve_lp1)
897 request_resource(&ioport_resource, &lp1);
898 if (mdesc->reserve_lp2)
899 request_resource(&ioport_resource, &lp2);
900}
901
801820be
AB
902#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
903 defined(CONFIG_EFI)
1da177e4
LT
904struct screen_info screen_info = {
905 .orig_video_lines = 30,
906 .orig_video_cols = 80,
907 .orig_video_mode = 0,
908 .orig_video_ega_bx = 0,
909 .orig_video_isVGA = 1,
910 .orig_video_points = 8
911};
4394c124 912#endif
1da177e4 913
1da177e4
LT
914static int __init customize_machine(void)
915{
883a106b
AB
916 /*
917 * customizes platform devices, or adds new ones
918 * On DT based machines, we fall back to populating the
919 * machine from the device tree, if no callback is provided,
920 * otherwise we would always need an init_machine callback.
921 */
8ff1443c
RK
922 if (machine_desc->init_machine)
923 machine_desc->init_machine();
850bea23 924
1da177e4
LT
925 return 0;
926}
927arch_initcall(customize_machine);
928
90de4137
SG
929static int __init init_machine_late(void)
930{
3f599875
PK
931 struct device_node *root;
932 int ret;
933
90de4137
SG
934 if (machine_desc->init_late)
935 machine_desc->init_late();
3f599875
PK
936
937 root = of_find_node_by_path("/");
938 if (root) {
939 ret = of_property_read_string(root, "serial-number",
940 &system_serial);
941 if (ret)
942 system_serial = NULL;
943 }
944
945 if (!system_serial)
946 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
947 system_serial_high,
948 system_serial_low);
949
90de4137
SG
950 return 0;
951}
952late_initcall(init_machine_late);
953
3c57fb43 954#ifdef CONFIG_KEXEC
61603016
RK
955/*
956 * The crash region must be aligned to 128MB to avoid
957 * zImage relocating below the reserved region.
958 */
959#define CRASH_ALIGN (128 << 20)
61603016 960
3c57fb43
MW
961static inline unsigned long long get_total_mem(void)
962{
963 unsigned long total;
964
965 total = max_low_pfn - min_low_pfn;
966 return total << PAGE_SHIFT;
967}
968
969/**
970 * reserve_crashkernel() - reserves memory are for crash kernel
971 *
972 * This function reserves memory area given in "crashkernel=" kernel command
973 * line parameter. The memory reserved is used by a dump capture kernel when
974 * primary kernel is crashing.
975 */
976static void __init reserve_crashkernel(void)
977{
978 unsigned long long crash_size, crash_base;
979 unsigned long long total_mem;
980 int ret;
981
982 total_mem = get_total_mem();
983 ret = parse_crashkernel(boot_command_line, total_mem,
984 &crash_size, &crash_base);
985 if (ret)
986 return;
987
61603016 988 if (crash_base <= 0) {
d0506a23 989 unsigned long long crash_max = idmap_to_phys((u32)~0);
67556d7a
RK
990 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
991 if (crash_max > lowmem_max)
992 crash_max = lowmem_max;
61603016
RK
993 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
994 crash_size, CRASH_ALIGN);
995 if (!crash_base) {
996 pr_err("crashkernel reservation failed - No suitable area found.\n");
997 return;
998 }
999 } else {
1000 unsigned long long start;
1001
1002 start = memblock_find_in_range(crash_base,
1003 crash_base + crash_size,
1004 crash_size, SECTION_SIZE);
1005 if (start != crash_base) {
1006 pr_err("crashkernel reservation failed - memory is in use.\n");
1007 return;
1008 }
1009 }
1010
84f452b1 1011 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 1012 if (ret < 0) {
1b0f6681
OJ
1013 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1014 (unsigned long)crash_base);
3c57fb43
MW
1015 return;
1016 }
1017
1b0f6681
OJ
1018 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1019 (unsigned long)(crash_size >> 20),
1020 (unsigned long)(crash_base >> 20),
1021 (unsigned long)(total_mem >> 20));
3c57fb43 1022
f7f0b7dc 1023 /* The crashk resource must always be located in normal mem */
3c57fb43
MW
1024 crashk_res.start = crash_base;
1025 crashk_res.end = crash_base + crash_size - 1;
1026 insert_resource(&iomem_resource, &crashk_res);
f7f0b7dc
RK
1027
1028 if (arm_has_idmap_alias()) {
1029 /*
1030 * If we have a special RAM alias for use at boot, we
1031 * need to advertise to kexec tools where the alias is.
1032 */
1033 static struct resource crashk_boot_res = {
1034 .name = "Crash kernel (boot alias)",
1035 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1036 };
1037
1038 crashk_boot_res.start = phys_to_idmap(crash_base);
1039 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1040 insert_resource(&iomem_resource, &crashk_boot_res);
1041 }
3c57fb43
MW
1042}
1043#else
1044static inline void reserve_crashkernel(void) {}
1045#endif /* CONFIG_KEXEC */
1046
4588c34d
DM
1047void __init hyp_mode_check(void)
1048{
1049#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
1050 sync_boot_mode();
1051
4588c34d
DM
1052 if (is_hyp_mode_available()) {
1053 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1054 pr_info("CPU: Virtualization extensions available.\n");
1055 } else if (is_hyp_mode_mismatched()) {
1056 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1057 __boot_cpu_mode & MODE_MASK);
1058 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1059 } else
1060 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1061#endif
1062}
1063
6291319d
GL
1064void __init setup_arch(char **cmdline_p)
1065{
ff69a4c8 1066 const struct machine_desc *mdesc;
6291319d 1067
6291319d 1068 setup_processor();
93c02ab4
GL
1069 mdesc = setup_machine_fdt(__atags_pointer);
1070 if (!mdesc)
b8b499c8 1071 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
1072 machine_desc = mdesc;
1073 machine_name = mdesc->name;
719c9d14 1074 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1075
16d6d5b0
RH
1076 if (mdesc->reboot_mode != REBOOT_HARD)
1077 reboot_mode = mdesc->reboot_mode;
6291319d 1078
37efe642
RK
1079 init_mm.start_code = (unsigned long) _text;
1080 init_mm.end_code = (unsigned long) _etext;
1081 init_mm.end_data = (unsigned long) _edata;
1082 init_mm.brk = (unsigned long) _end;
1da177e4 1083
48ab7e09
JK
1084 /* populate cmd_line too for later use, preserving boot_command_line */
1085 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1086 *cmdline_p = cmd_line;
2b0d8c25 1087
2937367b
AB
1088 early_fixmap_init();
1089 early_ioremap_init();
a5f4c561 1090
2b0d8c25
JK
1091 parse_early_param();
1092
1221ed10 1093#ifdef CONFIG_MMU
b089c31c 1094 early_mm_init(mdesc);
1221ed10 1095#endif
7c927322 1096 setup_dma_zone(mdesc);
9b08aaa3 1097 xen_early_init();
da58fb65 1098 efi_init();
98562656
LA
1099 /*
1100 * Make sure the calculation for lowmem/highmem is set appropriately
1101 * before reserving/allocating any mmeory
1102 */
374d446d 1103 adjust_lowmem_bounds();
1c2f87c2 1104 arm_memblock_init(mdesc);
98562656
LA
1105 /* Memory may have been removed so recalculate the bounds. */
1106 adjust_lowmem_bounds();
2778f620 1107
2937367b
AB
1108 early_ioremap_reset();
1109
4b5f32ce 1110 paging_init(mdesc);
11b9369c 1111 request_standard_resources(mdesc);
1da177e4 1112
a528721d
RK
1113 if (mdesc->restart)
1114 arm_pm_restart = mdesc->restart;
1115
93c02ab4
GL
1116 unflatten_device_tree();
1117
5587164e 1118 arm_dt_init_cpu_maps();
be120397 1119 psci_dt_init();
7bbb7940 1120#ifdef CONFIG_SMP
abcee5fb 1121 if (is_smp()) {
b382b940
JM
1122 if (!mdesc->smp_init || !mdesc->smp_init()) {
1123 if (psci_smp_available())
1124 smp_set_ops(&psci_smp_ops);
1125 else if (mdesc->smp)
1126 smp_set_ops(mdesc->smp);
1127 }
f00ec48f 1128 smp_init_cpus();
8cf72172 1129 smp_build_mpidr_hash();
abcee5fb 1130 }
7bbb7940 1131#endif
4588c34d
DM
1132
1133 if (!is_smp())
1134 hyp_mode_check();
1135
3c57fb43 1136 reserve_crashkernel();
7bbb7940 1137
52108641 1138#ifdef CONFIG_MULTI_IRQ_HANDLER
1139 handle_arch_irq = mdesc->handle_irq;
1140#endif
1da177e4
LT
1141
1142#ifdef CONFIG_VT
1143#if defined(CONFIG_VGA_CONSOLE)
1144 conswitchp = &vga_con;
1145#elif defined(CONFIG_DUMMY_CONSOLE)
1146 conswitchp = &dummy_con;
1147#endif
1148#endif
dec12e62
RK
1149
1150 if (mdesc->init_early)
1151 mdesc->init_early();
1da177e4
LT
1152}
1153
1154
1155static int __init topology_init(void)
1156{
1157 int cpu;
1158
66fb8bd2
RK
1159 for_each_possible_cpu(cpu) {
1160 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1161 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1162 register_cpu(&cpuinfo->cpu, cpu);
1163 }
1da177e4
LT
1164
1165 return 0;
1166}
1da177e4
LT
1167subsys_initcall(topology_init);
1168
e119bfff
RK
1169#ifdef CONFIG_HAVE_PROC_CPU
1170static int __init proc_cpu_init(void)
1171{
1172 struct proc_dir_entry *res;
1173
1174 res = proc_mkdir("cpu", NULL);
1175 if (!res)
1176 return -ENOMEM;
1177 return 0;
1178}
1179fs_initcall(proc_cpu_init);
1180#endif
1181
1da177e4
LT
1182static const char *hwcap_str[] = {
1183 "swp",
1184 "half",
1185 "thumb",
1186 "26bit",
1187 "fastmult",
1188 "fpa",
1189 "vfp",
1190 "edsp",
1191 "java",
8f7f9435 1192 "iwmmxt",
99e4a6dd 1193 "crunch",
4369ae16 1194 "thumbee",
2bedbdf4 1195 "neon",
7279dc3e
CM
1196 "vfpv3",
1197 "vfpv3d16",
254cdf8e
WD
1198 "tls",
1199 "vfpv4",
1200 "idiva",
1201 "idivt",
ab8d46c0 1202 "vfpd32",
a469abd0 1203 "lpae",
e9faebc6 1204 "evtstrm",
1da177e4
LT
1205 NULL
1206};
1207
b342ea4e 1208static const char *hwcap2_str[] = {
8258a989
AB
1209 "aes",
1210 "pmull",
1211 "sha1",
1212 "sha2",
1213 "crc32",
b342ea4e
AB
1214 NULL
1215};
1216
1da177e4
LT
1217static int c_show(struct seq_file *m, void *v)
1218{
b4b8f770
LP
1219 int i, j;
1220 u32 cpuid;
1da177e4 1221
1da177e4 1222 for_each_online_cpu(i) {
15559722
RK
1223 /*
1224 * glibc reads /proc/cpuinfo to determine the number of
1225 * online processors, looking for lines beginning with
1226 * "processor". Give glibc what it expects.
1227 */
1228 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1229 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1230 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1231 cpu_name, cpuid & 15, elf_platform);
1232
4bf9636c
PM
1233#if defined(CONFIG_SMP)
1234 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1235 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1236 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1237#else
1238 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1239 loops_per_jiffy / (500000/HZ),
1240 (loops_per_jiffy / (5000/HZ)) % 100);
1241#endif
b4b8f770
LP
1242 /* dump out the processor features */
1243 seq_puts(m, "Features\t: ");
1da177e4 1244
b4b8f770
LP
1245 for (j = 0; hwcap_str[j]; j++)
1246 if (elf_hwcap & (1 << j))
1247 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1248
b342ea4e
AB
1249 for (j = 0; hwcap2_str[j]; j++)
1250 if (elf_hwcap2 & (1 << j))
1251 seq_printf(m, "%s ", hwcap2_str[j]);
1252
b4b8f770
LP
1253 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1254 seq_printf(m, "CPU architecture: %s\n",
1255 proc_arch[cpu_architecture()]);
1da177e4 1256
b4b8f770
LP
1257 if ((cpuid & 0x0008f000) == 0x00000000) {
1258 /* pre-ARM7 */
1259 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1260 } else {
b4b8f770
LP
1261 if ((cpuid & 0x0008f000) == 0x00007000) {
1262 /* ARM7 */
1263 seq_printf(m, "CPU variant\t: 0x%02x\n",
1264 (cpuid >> 16) & 127);
1265 } else {
1266 /* post-ARM7 */
1267 seq_printf(m, "CPU variant\t: 0x%x\n",
1268 (cpuid >> 20) & 15);
1269 }
1270 seq_printf(m, "CPU part\t: 0x%03x\n",
1271 (cpuid >> 4) & 0xfff);
1da177e4 1272 }
b4b8f770 1273 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1274 }
1da177e4
LT
1275
1276 seq_printf(m, "Hardware\t: %s\n", machine_name);
1277 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1278 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1279
1280 return 0;
1281}
1282
1283static void *c_start(struct seq_file *m, loff_t *pos)
1284{
1285 return *pos < 1 ? (void *)1 : NULL;
1286}
1287
1288static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1289{
1290 ++*pos;
1291 return NULL;
1292}
1293
1294static void c_stop(struct seq_file *m, void *v)
1295{
1296}
1297
2ffd6e18 1298const struct seq_operations cpuinfo_op = {
1da177e4
LT
1299 .start = c_start,
1300 .next = c_next,
1301 .stop = c_stop,
1302 .show = c_show
1303};