]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/kernel/setup.c
ARM: 8594/1: enable binfmt_flat on systems with an MMU
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
894673ee 21#include <linux/screen_info.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
be120397 34#include <linux/psci.h>
1da177e4 35
b86040a5 36#include <asm/unified.h>
15d07dc9 37#include <asm/cp15.h>
1da177e4 38#include <asm/cpu.h>
0ba8b9b2 39#include <asm/cputype.h>
da58fb65 40#include <asm/efi.h>
1da177e4 41#include <asm/elf.h>
2937367b 42#include <asm/early_ioremap.h>
a5f4c561 43#include <asm/fixmap.h>
1da177e4 44#include <asm/procinfo.h>
05774088 45#include <asm/psci.h>
37efe642 46#include <asm/sections.h>
1da177e4 47#include <asm/setup.h>
f00ec48f 48#include <asm/smp_plat.h>
1da177e4
LT
49#include <asm/mach-types.h>
50#include <asm/cacheflush.h>
46097c7d 51#include <asm/cachetype.h>
1da177e4 52#include <asm/tlbflush.h>
5882bfef 53#include <asm/xen/hypervisor.h>
1da177e4 54
93c02ab4 55#include <asm/prom.h>
1da177e4
LT
56#include <asm/mach/arch.h>
57#include <asm/mach/irq.h>
58#include <asm/mach/time.h>
9f97da78
DH
59#include <asm/system_info.h>
60#include <asm/system_misc.h>
5cbad0eb 61#include <asm/traps.h>
bff595c1 62#include <asm/unwind.h>
1c16d242 63#include <asm/memblock.h>
4588c34d 64#include <asm/virt.h>
1da177e4 65
4cd9d6f7 66#include "atags.h"
0fc1c832 67
1da177e4
LT
68
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8];
71
72static int __init fpe_setup(char *line)
73{
74 memcpy(fpe_type, line, 8);
75 return 1;
76}
77
78__setup("fpe=", fpe_setup);
79#endif
80
ca8f0b0a 81extern void init_default_cache_policy(unsigned long);
ff69a4c8 82extern void paging_init(const struct machine_desc *desc);
1221ed10 83extern void early_paging_init(const struct machine_desc *);
0371d3f7 84extern void sanity_check_meminfo(void);
16d6d5b0 85extern enum reboot_mode reboot_mode;
ff69a4c8 86extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
87
88unsigned int processor_id;
c18f6581 89EXPORT_SYMBOL(processor_id);
0385ebc0 90unsigned int __machine_arch_type __read_mostly;
1da177e4 91EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 92unsigned int cacheid __read_mostly;
c0e95878 93EXPORT_SYMBOL(cacheid);
1da177e4 94
9d20fdd5
BG
95unsigned int __atags_pointer __initdata;
96
1da177e4
LT
97unsigned int system_rev;
98EXPORT_SYMBOL(system_rev);
99
3f599875
PK
100const char *system_serial;
101EXPORT_SYMBOL(system_serial);
102
1da177e4
LT
103unsigned int system_serial_low;
104EXPORT_SYMBOL(system_serial_low);
105
106unsigned int system_serial_high;
107EXPORT_SYMBOL(system_serial_high);
108
0385ebc0 109unsigned int elf_hwcap __read_mostly;
1da177e4
LT
110EXPORT_SYMBOL(elf_hwcap);
111
b342ea4e
AB
112unsigned int elf_hwcap2 __read_mostly;
113EXPORT_SYMBOL(elf_hwcap2);
114
1da177e4
LT
115
116#ifdef MULTI_CPU
0385ebc0 117struct processor processor __read_mostly;
1da177e4
LT
118#endif
119#ifdef MULTI_TLB
0385ebc0 120struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
121#endif
122#ifdef MULTI_USER
0385ebc0 123struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
124#endif
125#ifdef MULTI_CACHE
0385ebc0 126struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 127#endif
953233dc 128#ifdef CONFIG_OUTER_CACHE
0385ebc0 129struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 130EXPORT_SYMBOL(outer_cache);
953233dc 131#endif
1da177e4 132
2ecccf90
DM
133/*
134 * Cached cpu_architecture() result for use by assembler code.
135 * C code should use the cpu_architecture() function instead of accessing this
136 * variable directly.
137 */
138int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
139
ccea7a19
RK
140struct stack {
141 u32 irq[3];
142 u32 abt[3];
143 u32 und[3];
c0e7f7ee 144 u32 fiq[3];
ccea7a19
RK
145} ____cacheline_aligned;
146
55bdd694 147#ifndef CONFIG_CPU_V7M
ccea7a19 148static struct stack stacks[NR_CPUS];
55bdd694 149#endif
ccea7a19 150
1da177e4
LT
151char elf_platform[ELF_PLATFORM_SIZE];
152EXPORT_SYMBOL(elf_platform);
153
1da177e4
LT
154static const char *cpu_name;
155static const char *machine_name;
48ab7e09 156static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 157const struct machine_desc *machine_desc __initdata;
1da177e4 158
1da177e4
LT
159static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160#define ENDIANNESS ((char)endian_test.l)
161
162DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
163
164/*
165 * Standard memory resources
166 */
167static struct resource mem_res[] = {
740e518e
GKH
168 {
169 .name = "Video RAM",
170 .start = 0,
171 .end = 0,
172 .flags = IORESOURCE_MEM
173 },
174 {
a36d8e5b 175 .name = "Kernel code",
740e518e
GKH
176 .start = 0,
177 .end = 0,
35d98e93 178 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
179 },
180 {
181 .name = "Kernel data",
182 .start = 0,
183 .end = 0,
35d98e93 184 .flags = IORESOURCE_SYSTEM_RAM
740e518e 185 }
1da177e4
LT
186};
187
188#define video_ram mem_res[0]
189#define kernel_code mem_res[1]
190#define kernel_data mem_res[2]
191
192static struct resource io_res[] = {
740e518e
GKH
193 {
194 .name = "reserved",
195 .start = 0x3bc,
196 .end = 0x3be,
197 .flags = IORESOURCE_IO | IORESOURCE_BUSY
198 },
199 {
200 .name = "reserved",
201 .start = 0x378,
202 .end = 0x37f,
203 .flags = IORESOURCE_IO | IORESOURCE_BUSY
204 },
205 {
206 .name = "reserved",
207 .start = 0x278,
208 .end = 0x27f,
209 .flags = IORESOURCE_IO | IORESOURCE_BUSY
210 }
1da177e4
LT
211};
212
213#define lp0 io_res[0]
214#define lp1 io_res[1]
215#define lp2 io_res[2]
216
1da177e4
LT
217static const char *proc_arch[] = {
218 "undefined/unknown",
219 "3",
220 "4",
221 "4T",
222 "5",
223 "5T",
224 "5TE",
225 "5TEJ",
226 "6TEJ",
6b090a25 227 "7",
55bdd694 228 "7M",
1da177e4
LT
229 "?(12)",
230 "?(13)",
231 "?(14)",
232 "?(15)",
233 "?(16)",
234 "?(17)",
235};
236
55bdd694
CM
237#ifdef CONFIG_CPU_V7M
238static int __get_cpu_architecture(void)
239{
240 return CPU_ARCH_ARMv7M;
241}
242#else
2ecccf90 243static int __get_cpu_architecture(void)
1da177e4
LT
244{
245 int cpu_arch;
246
0ba8b9b2 247 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 248 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
249 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
253 if (cpu_arch)
254 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 255 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
256 /* Revised CPUID format. Read the Memory Model Feature
257 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 258 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
259 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
261 cpu_arch = CPU_ARCH_ARMv7;
262 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 (mmfr0 & 0x000000f0) == 0x00000020)
264 cpu_arch = CPU_ARCH_ARMv6;
265 else
266 cpu_arch = CPU_ARCH_UNKNOWN;
267 } else
268 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
269
270 return cpu_arch;
271}
55bdd694 272#endif
1da177e4 273
2ecccf90
DM
274int __pure cpu_architecture(void)
275{
276 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
277
278 return __cpu_architecture;
279}
280
8925ec4c
WD
281static int cpu_has_aliasing_icache(unsigned int arch)
282{
283 int aliasing_icache;
284 unsigned int id_reg, num_sets, line_size;
285
7f94e9cc
WD
286 /* PIPT caches never alias. */
287 if (icache_is_pipt())
288 return 0;
289
8925ec4c
WD
290 /* arch specifies the register format */
291 switch (arch) {
292 case CPU_ARCH_ARMv7:
5fb31a96
LW
293 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
294 : /* No output operands */
8925ec4c 295 : "r" (1));
5fb31a96
LW
296 isb();
297 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
298 : "=r" (id_reg));
8925ec4c
WD
299 line_size = 4 << ((id_reg & 0x7) + 2);
300 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 break;
303 case CPU_ARCH_ARMv6:
304 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 break;
306 default:
307 /* I-cache aliases will be handled by D-cache aliasing code */
308 aliasing_icache = 0;
309 }
310
311 return aliasing_icache;
312}
313
c0e95878
RK
314static void __init cacheid_init(void)
315{
c0e95878
RK
316 unsigned int arch = cpu_architecture();
317
55bdd694
CM
318 if (arch == CPU_ARCH_ARMv7M) {
319 cacheid = 0;
320 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 321 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
322 if ((cachetype & (7 << 29)) == 4 << 29) {
323 /* ARMv7 register format */
72dc53ac 324 arch = CPU_ARCH_ARMv7;
b57ee99f 325 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
326 switch (cachetype & (3 << 14)) {
327 case (1 << 14):
b57ee99f 328 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
329 break;
330 case (3 << 14):
331 cacheid |= CACHEID_PIPT;
332 break;
333 }
8925ec4c 334 } else {
72dc53ac
WD
335 arch = CPU_ARCH_ARMv6;
336 if (cachetype & (1 << 23))
337 cacheid = CACHEID_VIPT_ALIASING;
338 else
339 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 340 }
72dc53ac
WD
341 if (cpu_has_aliasing_icache(arch))
342 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
343 } else {
344 cacheid = CACHEID_VIVT;
345 }
2b4ae1f1 346
1b0f6681 347 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
348 cache_is_vivt() ? "VIVT" :
349 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 350 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
351 cache_is_vivt() ? "VIVT" :
352 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 353 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 354 icache_is_pipt() ? "PIPT" :
2b4ae1f1 355 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
356}
357
1da177e4
LT
358/*
359 * These functions re-use the assembly code in head.S, which
360 * already provide the required functionality.
361 */
0f44ba1d 362extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 363
93c02ab4 364void __init early_print(const char *str, ...)
6fc31d54
RK
365{
366 extern void printascii(const char *);
367 char buf[256];
368 va_list ap;
369
370 va_start(ap, str);
371 vsnprintf(buf, sizeof(buf), str, ap);
372 va_end(ap);
373
374#ifdef CONFIG_DEBUG_LL
375 printascii(buf);
376#endif
377 printk("%s", buf);
378}
379
42f25bdd
NP
380#ifdef CONFIG_ARM_PATCH_IDIV
381
382static inline u32 __attribute_const__ sdiv_instruction(void)
383{
384 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
385 /* "sdiv r0, r0, r1" */
386 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
387 return __opcode_to_mem_thumb32(insn);
388 }
389
390 /* "sdiv r0, r0, r1" */
391 return __opcode_to_mem_arm(0xe710f110);
392}
393
394static inline u32 __attribute_const__ udiv_instruction(void)
395{
396 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
397 /* "udiv r0, r0, r1" */
398 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
399 return __opcode_to_mem_thumb32(insn);
400 }
401
402 /* "udiv r0, r0, r1" */
403 return __opcode_to_mem_arm(0xe730f110);
404}
405
406static inline u32 __attribute_const__ bx_lr_instruction(void)
407{
408 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
409 /* "bx lr; nop" */
410 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
411 return __opcode_to_mem_thumb32(insn);
412 }
413
414 /* "bx lr" */
415 return __opcode_to_mem_arm(0xe12fff1e);
416}
417
418static void __init patch_aeabi_idiv(void)
419{
420 extern void __aeabi_uidiv(void);
421 extern void __aeabi_idiv(void);
422 uintptr_t fn_addr;
423 unsigned int mask;
424
425 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
426 if (!(elf_hwcap & mask))
427 return;
428
429 pr_info("CPU: div instructions available: patching division code\n");
430
431 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 432 asm ("" : "+g" (fn_addr));
42f25bdd
NP
433 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 438 asm ("" : "+g" (fn_addr));
42f25bdd
NP
439 ((u32 *)fn_addr)[0] = sdiv_instruction();
440 ((u32 *)fn_addr)[1] = bx_lr_instruction();
441 flush_icache_range(fn_addr, fn_addr + 8);
442}
443
444#else
445static inline void patch_aeabi_idiv(void) { }
446#endif
447
8164f7af
SB
448static void __init cpuid_init_hwcaps(void)
449{
b8c9592b 450 int block;
a092aedb 451 u32 isar5;
8164f7af
SB
452
453 if (cpu_architecture() < CPU_ARCH_ARMv7)
454 return;
455
b8c9592b
AB
456 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
457 if (block >= 2)
8164f7af 458 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 459 if (block >= 1)
8164f7af 460 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
461
462 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
463 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
464 if (block >= 5)
a469abd0 465 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
466
467 /* check for supported v8 Crypto instructions */
468 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
469
470 block = cpuid_feature_extract_field(isar5, 4);
471 if (block >= 2)
472 elf_hwcap2 |= HWCAP2_PMULL;
473 if (block >= 1)
474 elf_hwcap2 |= HWCAP2_AES;
475
476 block = cpuid_feature_extract_field(isar5, 8);
477 if (block >= 1)
478 elf_hwcap2 |= HWCAP2_SHA1;
479
480 block = cpuid_feature_extract_field(isar5, 12);
481 if (block >= 1)
482 elf_hwcap2 |= HWCAP2_SHA2;
483
484 block = cpuid_feature_extract_field(isar5, 16);
485 if (block >= 1)
486 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
487}
488
58171bf2 489static void __init elf_hwcap_fixup(void)
f159f4ed 490{
58171bf2 491 unsigned id = read_cpuid_id();
f159f4ed
TL
492
493 /*
494 * HWCAP_TLS is available only on 1136 r1p0 and later,
495 * see also kuser_get_tls_init.
496 */
58171bf2
RK
497 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
498 ((id >> 20) & 3) == 0) {
f159f4ed 499 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
500 return;
501 }
502
503 /* Verify if CPUID scheme is implemented */
504 if ((id & 0x000f0000) != 0x000f0000)
505 return;
506
507 /*
508 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
509 * avoid advertising SWP; it may not be atomic with
510 * multiprocessing cores.
511 */
b8c9592b
AB
512 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
513 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
03f1217e 514 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
58171bf2 515 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
516}
517
ccea7a19
RK
518/*
519 * cpu_init - initialise one CPU.
520 *
90f1e084 521 * cpu_init sets up the per-CPU stacks.
ccea7a19 522 */
1783d457 523void notrace cpu_init(void)
ccea7a19 524{
55bdd694 525#ifndef CONFIG_CPU_V7M
ccea7a19
RK
526 unsigned int cpu = smp_processor_id();
527 struct stack *stk = &stacks[cpu];
528
529 if (cpu >= NR_CPUS) {
1b0f6681 530 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
531 BUG();
532 }
533
14318efb
RH
534 /*
535 * This only works on resume and secondary cores. For booting on the
536 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
537 */
538 set_my_cpu_offset(per_cpu_offset(cpu));
539
b69874e4
RK
540 cpu_proc_init();
541
b86040a5
CM
542 /*
543 * Define the placement constraint for the inline asm directive below.
544 * In Thumb-2, msr with an immediate value is not allowed.
545 */
546#ifdef CONFIG_THUMB2_KERNEL
547#define PLC "r"
548#else
549#define PLC "I"
550#endif
551
ccea7a19
RK
552 /*
553 * setup stacks for re-entrant exception handlers
554 */
555 __asm__ (
556 "msr cpsr_c, %1\n\t"
b86040a5
CM
557 "add r14, %0, %2\n\t"
558 "mov sp, r14\n\t"
ccea7a19 559 "msr cpsr_c, %3\n\t"
b86040a5
CM
560 "add r14, %0, %4\n\t"
561 "mov sp, r14\n\t"
ccea7a19 562 "msr cpsr_c, %5\n\t"
b86040a5
CM
563 "add r14, %0, %6\n\t"
564 "mov sp, r14\n\t"
c0e7f7ee
DT
565 "msr cpsr_c, %7\n\t"
566 "add r14, %0, %8\n\t"
567 "mov sp, r14\n\t"
568 "msr cpsr_c, %9"
ccea7a19
RK
569 :
570 : "r" (stk),
b86040a5 571 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 572 "I" (offsetof(struct stack, irq[0])),
b86040a5 573 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 574 "I" (offsetof(struct stack, abt[0])),
b86040a5 575 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 576 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
577 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
578 "I" (offsetof(struct stack, fiq[0])),
b86040a5 579 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 580 : "r14");
55bdd694 581#endif
ccea7a19
RK
582}
583
18d7f152 584u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
585
586void __init smp_setup_processor_id(void)
587{
588 int i;
cb8cf4f8
LP
589 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
590 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
591
592 cpu_logical_map(0) = cpu;
cb8cf4f8 593 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
594 cpu_logical_map(i) = i == cpu ? 0 : i;
595
9394c1c6
ML
596 /*
597 * clear __my_cpu_offset on boot CPU to avoid hang caused by
598 * using percpu variable early, for example, lockdep will
599 * access percpu variable inside lock_release
600 */
601 set_my_cpu_offset(0);
602
1b0f6681 603 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
604}
605
8cf72172
LP
606struct mpidr_hash mpidr_hash;
607#ifdef CONFIG_SMP
608/**
609 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
610 * level in order to build a linear index from an
611 * MPIDR value. Resulting algorithm is a collision
612 * free hash carried out through shifting and ORing
613 */
614static void __init smp_build_mpidr_hash(void)
615{
616 u32 i, affinity;
617 u32 fs[3], bits[3], ls, mask = 0;
618 /*
619 * Pre-scan the list of MPIDRS and filter out bits that do
620 * not contribute to affinity levels, ie they never toggle.
621 */
622 for_each_possible_cpu(i)
623 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
624 pr_debug("mask of set bits 0x%x\n", mask);
625 /*
626 * Find and stash the last and first bit set at all affinity levels to
627 * check how many bits are required to represent them.
628 */
629 for (i = 0; i < 3; i++) {
630 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
631 /*
632 * Find the MSB bit and LSB bits position
633 * to determine how many bits are required
634 * to express the affinity level.
635 */
636 ls = fls(affinity);
637 fs[i] = affinity ? ffs(affinity) - 1 : 0;
638 bits[i] = ls - fs[i];
639 }
640 /*
641 * An index can be created from the MPIDR by isolating the
642 * significant bits at each affinity level and by shifting
643 * them in order to compress the 24 bits values space to a
644 * compressed set of values. This is equivalent to hashing
645 * the MPIDR through shifting and ORing. It is a collision free
646 * hash though not minimal since some levels might contain a number
647 * of CPUs that is not an exact power of 2 and their bit
648 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
649 */
650 mpidr_hash.shift_aff[0] = fs[0];
651 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
652 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
653 (bits[1] + bits[0]);
654 mpidr_hash.mask = mask;
655 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
656 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
657 mpidr_hash.shift_aff[0],
658 mpidr_hash.shift_aff[1],
659 mpidr_hash.shift_aff[2],
660 mpidr_hash.mask,
661 mpidr_hash.bits);
662 /*
663 * 4x is an arbitrary value used to warn on a hash table much bigger
664 * than expected on most systems.
665 */
666 if (mpidr_hash_size() > 4 * num_possible_cpus())
667 pr_warn("Large number of MPIDR hash buckets detected\n");
668 sync_cache_w(&mpidr_hash);
669}
670#endif
671
b69874e4
RK
672static void __init setup_processor(void)
673{
674 struct proc_info_list *list;
675
676 /*
677 * locate processor in the list of supported processor
678 * types. The linker builds this table for us from the
679 * entries in arch/arm/mm/proc-*.S
680 */
681 list = lookup_processor_type(read_cpuid_id());
682 if (!list) {
1b0f6681
OJ
683 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
684 read_cpuid_id());
b69874e4
RK
685 while (1);
686 }
687
688 cpu_name = list->cpu_name;
2ecccf90 689 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
690
691#ifdef MULTI_CPU
692 processor = *list->proc;
693#endif
694#ifdef MULTI_TLB
695 cpu_tlb = *list->tlb;
696#endif
697#ifdef MULTI_USER
698 cpu_user = *list->user;
699#endif
700#ifdef MULTI_CACHE
701 cpu_cache = *list->cache;
702#endif
703
1b0f6681
OJ
704 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
705 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 706 proc_arch[cpu_architecture()], get_cr());
b69874e4 707
a34dbfb0
WD
708 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
709 list->arch_name, ENDIANNESS);
710 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
711 list->elf_name, ENDIANNESS);
b69874e4 712 elf_hwcap = list->elf_hwcap;
8164f7af
SB
713
714 cpuid_init_hwcaps();
42f25bdd 715 patch_aeabi_idiv();
8164f7af 716
b69874e4 717#ifndef CONFIG_ARM_THUMB
c40e3641 718 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 719#endif
ca8f0b0a
RK
720#ifdef CONFIG_MMU
721 init_default_cache_policy(list->__cpu_mm_mmu_flags);
722#endif
92871b94
RH
723 erratum_a15_798181_init();
724
58171bf2 725 elf_hwcap_fixup();
b69874e4
RK
726
727 cacheid_init();
728 cpu_init();
729}
730
93c02ab4 731void __init dump_machine_table(void)
1da177e4 732{
ff69a4c8 733 const struct machine_desc *p;
1da177e4 734
6291319d
GL
735 early_print("Available machine support:\n\nID (hex)\tNAME\n");
736 for_each_machine_desc(p)
dce72dd0 737 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 738
dce72dd0 739 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 740
dce72dd0
NP
741 while (true)
742 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
743}
744
6a5014aa 745int __init arm_add_memory(u64 start, u64 size)
3a669411 746{
6d7d5da7 747 u64 aligned_start;
4b5f32ce 748
3a669411
RK
749 /*
750 * Ensure that start/size are aligned to a page boundary.
909ba297 751 * Size is rounded down, start is rounded up.
3a669411 752 */
6d7d5da7 753 aligned_start = PAGE_ALIGN(start);
909ba297
MY
754 if (aligned_start > start + size)
755 size = 0;
756 else
757 size -= aligned_start - start;
e5ab8580 758
6d7d5da7
MD
759#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
760 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
761 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
762 (long long)start);
6d7d5da7
MD
763 return -EINVAL;
764 }
765
766 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
767 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
768 (long long)start);
e5ab8580
WD
769 /*
770 * To ensure bank->start + bank->size is representable in
771 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
772 * This means we lose a page after masking.
773 */
6d7d5da7 774 size = ULONG_MAX - aligned_start;
e5ab8580
WD
775 }
776#endif
777
571b1437
RK
778 if (aligned_start < PHYS_OFFSET) {
779 if (aligned_start + size <= PHYS_OFFSET) {
780 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
781 aligned_start, aligned_start + size);
782 return -EINVAL;
783 }
784
785 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
786 aligned_start, (u64)PHYS_OFFSET);
787
788 size -= PHYS_OFFSET - aligned_start;
789 aligned_start = PHYS_OFFSET;
790 }
791
1c2f87c2
LA
792 start = aligned_start;
793 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
794
795 /*
796 * Check whether this memory region has non-zero size or
797 * invalid node number.
798 */
1c2f87c2 799 if (size == 0)
4b5f32ce
NP
800 return -EINVAL;
801
1c2f87c2 802 memblock_add(start, size);
4b5f32ce 803 return 0;
3a669411
RK
804}
805
1da177e4
LT
806/*
807 * Pick out the memory size. We look for mem=size@start,
808 * where start and size are "size[KkMm]"
809 */
1c2f87c2 810
2b0d8c25 811static int __init early_mem(char *p)
1da177e4
LT
812{
813 static int usermem __initdata = 0;
6a5014aa
MD
814 u64 size;
815 u64 start;
2b0d8c25 816 char *endp;
1da177e4
LT
817
818 /*
819 * If the user specifies memory size, we
820 * blow away any automatically generated
821 * size.
822 */
823 if (usermem == 0) {
824 usermem = 1;
1c2f87c2
LA
825 memblock_remove(memblock_start_of_DRAM(),
826 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
827 }
828
829 start = PHYS_OFFSET;
2b0d8c25
JK
830 size = memparse(p, &endp);
831 if (*endp == '@')
832 start = memparse(endp + 1, NULL);
1da177e4 833
1c97b73e 834 arm_add_memory(start, size);
1da177e4 835
2b0d8c25 836 return 0;
1da177e4 837}
2b0d8c25 838early_param("mem", early_mem);
1da177e4 839
ff69a4c8 840static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 841{
11b9369c 842 struct memblock_region *region;
1da177e4 843 struct resource *res;
1da177e4 844
37efe642 845 kernel_code.start = virt_to_phys(_text);
14c4a533 846 kernel_code.end = virt_to_phys(__init_begin - 1);
842eab40 847 kernel_data.start = virt_to_phys(_sdata);
37efe642 848 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 849
11b9369c 850 for_each_memblock(memory, region) {
966fab00
RK
851 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
852 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
853 unsigned long boot_alias_start;
854
855 /*
856 * Some systems have a special memory alias which is only
857 * used for booting. We need to advertise this region to
858 * kexec-tools so they know where bootable RAM is located.
859 */
860 boot_alias_start = phys_to_idmap(start);
861 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
862 res = memblock_virt_alloc(sizeof(*res), 0);
863 res->name = "System RAM (boot alias)";
864 res->start = boot_alias_start;
865 res->end = phys_to_idmap(end);
866 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
867 request_resource(&iomem_resource, res);
868 }
869
ca474408 870 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 871 res->name = "System RAM";
966fab00
RK
872 res->start = start;
873 res->end = end;
35d98e93 874 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
875
876 request_resource(&iomem_resource, res);
877
878 if (kernel_code.start >= res->start &&
879 kernel_code.end <= res->end)
880 request_resource(res, &kernel_code);
881 if (kernel_data.start >= res->start &&
882 kernel_data.end <= res->end)
883 request_resource(res, &kernel_data);
884 }
885
886 if (mdesc->video_start) {
887 video_ram.start = mdesc->video_start;
888 video_ram.end = mdesc->video_end;
889 request_resource(&iomem_resource, &video_ram);
890 }
891
892 /*
893 * Some machines don't have the possibility of ever
894 * possessing lp0, lp1 or lp2
895 */
896 if (mdesc->reserve_lp0)
897 request_resource(&ioport_resource, &lp0);
898 if (mdesc->reserve_lp1)
899 request_resource(&ioport_resource, &lp1);
900 if (mdesc->reserve_lp2)
901 request_resource(&ioport_resource, &lp2);
902}
903
801820be
AB
904#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
905 defined(CONFIG_EFI)
1da177e4
LT
906struct screen_info screen_info = {
907 .orig_video_lines = 30,
908 .orig_video_cols = 80,
909 .orig_video_mode = 0,
910 .orig_video_ega_bx = 0,
911 .orig_video_isVGA = 1,
912 .orig_video_points = 8
913};
4394c124 914#endif
1da177e4 915
1da177e4
LT
916static int __init customize_machine(void)
917{
883a106b
AB
918 /*
919 * customizes platform devices, or adds new ones
920 * On DT based machines, we fall back to populating the
921 * machine from the device tree, if no callback is provided,
922 * otherwise we would always need an init_machine callback.
923 */
8ff1443c
RK
924 if (machine_desc->init_machine)
925 machine_desc->init_machine();
850bea23 926
1da177e4
LT
927 return 0;
928}
929arch_initcall(customize_machine);
930
90de4137
SG
931static int __init init_machine_late(void)
932{
3f599875
PK
933 struct device_node *root;
934 int ret;
935
90de4137
SG
936 if (machine_desc->init_late)
937 machine_desc->init_late();
3f599875
PK
938
939 root = of_find_node_by_path("/");
940 if (root) {
941 ret = of_property_read_string(root, "serial-number",
942 &system_serial);
943 if (ret)
944 system_serial = NULL;
945 }
946
947 if (!system_serial)
948 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
949 system_serial_high,
950 system_serial_low);
951
90de4137
SG
952 return 0;
953}
954late_initcall(init_machine_late);
955
3c57fb43 956#ifdef CONFIG_KEXEC
61603016
RK
957/*
958 * The crash region must be aligned to 128MB to avoid
959 * zImage relocating below the reserved region.
960 */
961#define CRASH_ALIGN (128 << 20)
61603016 962
3c57fb43
MW
963static inline unsigned long long get_total_mem(void)
964{
965 unsigned long total;
966
967 total = max_low_pfn - min_low_pfn;
968 return total << PAGE_SHIFT;
969}
970
971/**
972 * reserve_crashkernel() - reserves memory are for crash kernel
973 *
974 * This function reserves memory area given in "crashkernel=" kernel command
975 * line parameter. The memory reserved is used by a dump capture kernel when
976 * primary kernel is crashing.
977 */
978static void __init reserve_crashkernel(void)
979{
980 unsigned long long crash_size, crash_base;
981 unsigned long long total_mem;
982 int ret;
983
984 total_mem = get_total_mem();
985 ret = parse_crashkernel(boot_command_line, total_mem,
986 &crash_size, &crash_base);
987 if (ret)
988 return;
989
61603016 990 if (crash_base <= 0) {
d0506a23 991 unsigned long long crash_max = idmap_to_phys((u32)~0);
61603016
RK
992 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
993 crash_size, CRASH_ALIGN);
994 if (!crash_base) {
995 pr_err("crashkernel reservation failed - No suitable area found.\n");
996 return;
997 }
998 } else {
999 unsigned long long start;
1000
1001 start = memblock_find_in_range(crash_base,
1002 crash_base + crash_size,
1003 crash_size, SECTION_SIZE);
1004 if (start != crash_base) {
1005 pr_err("crashkernel reservation failed - memory is in use.\n");
1006 return;
1007 }
1008 }
1009
84f452b1 1010 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 1011 if (ret < 0) {
1b0f6681
OJ
1012 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1013 (unsigned long)crash_base);
3c57fb43
MW
1014 return;
1015 }
1016
1b0f6681
OJ
1017 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1018 (unsigned long)(crash_size >> 20),
1019 (unsigned long)(crash_base >> 20),
1020 (unsigned long)(total_mem >> 20));
3c57fb43 1021
f7f0b7dc 1022 /* The crashk resource must always be located in normal mem */
3c57fb43
MW
1023 crashk_res.start = crash_base;
1024 crashk_res.end = crash_base + crash_size - 1;
1025 insert_resource(&iomem_resource, &crashk_res);
f7f0b7dc
RK
1026
1027 if (arm_has_idmap_alias()) {
1028 /*
1029 * If we have a special RAM alias for use at boot, we
1030 * need to advertise to kexec tools where the alias is.
1031 */
1032 static struct resource crashk_boot_res = {
1033 .name = "Crash kernel (boot alias)",
1034 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1035 };
1036
1037 crashk_boot_res.start = phys_to_idmap(crash_base);
1038 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1039 insert_resource(&iomem_resource, &crashk_boot_res);
1040 }
3c57fb43
MW
1041}
1042#else
1043static inline void reserve_crashkernel(void) {}
1044#endif /* CONFIG_KEXEC */
1045
4588c34d
DM
1046void __init hyp_mode_check(void)
1047{
1048#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
1049 sync_boot_mode();
1050
4588c34d
DM
1051 if (is_hyp_mode_available()) {
1052 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1053 pr_info("CPU: Virtualization extensions available.\n");
1054 } else if (is_hyp_mode_mismatched()) {
1055 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1056 __boot_cpu_mode & MODE_MASK);
1057 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1058 } else
1059 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1060#endif
1061}
1062
6291319d
GL
1063void __init setup_arch(char **cmdline_p)
1064{
ff69a4c8 1065 const struct machine_desc *mdesc;
6291319d 1066
6291319d 1067 setup_processor();
93c02ab4
GL
1068 mdesc = setup_machine_fdt(__atags_pointer);
1069 if (!mdesc)
b8b499c8 1070 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
1071 machine_desc = mdesc;
1072 machine_name = mdesc->name;
719c9d14 1073 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1074
16d6d5b0
RH
1075 if (mdesc->reboot_mode != REBOOT_HARD)
1076 reboot_mode = mdesc->reboot_mode;
6291319d 1077
37efe642
RK
1078 init_mm.start_code = (unsigned long) _text;
1079 init_mm.end_code = (unsigned long) _etext;
1080 init_mm.end_data = (unsigned long) _edata;
1081 init_mm.brk = (unsigned long) _end;
1da177e4 1082
48ab7e09
JK
1083 /* populate cmd_line too for later use, preserving boot_command_line */
1084 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1085 *cmdline_p = cmd_line;
2b0d8c25 1086
2937367b
AB
1087 early_fixmap_init();
1088 early_ioremap_init();
a5f4c561 1089
2b0d8c25
JK
1090 parse_early_param();
1091
1221ed10
RK
1092#ifdef CONFIG_MMU
1093 early_paging_init(mdesc);
1094#endif
7c927322 1095 setup_dma_zone(mdesc);
9b08aaa3 1096 xen_early_init();
da58fb65 1097 efi_init();
0371d3f7 1098 sanity_check_meminfo();
1c2f87c2 1099 arm_memblock_init(mdesc);
2778f620 1100
2937367b
AB
1101 early_ioremap_reset();
1102
4b5f32ce 1103 paging_init(mdesc);
11b9369c 1104 request_standard_resources(mdesc);
1da177e4 1105
a528721d
RK
1106 if (mdesc->restart)
1107 arm_pm_restart = mdesc->restart;
1108
93c02ab4
GL
1109 unflatten_device_tree();
1110
5587164e 1111 arm_dt_init_cpu_maps();
be120397 1112 psci_dt_init();
7bbb7940 1113#ifdef CONFIG_SMP
abcee5fb 1114 if (is_smp()) {
b382b940
JM
1115 if (!mdesc->smp_init || !mdesc->smp_init()) {
1116 if (psci_smp_available())
1117 smp_set_ops(&psci_smp_ops);
1118 else if (mdesc->smp)
1119 smp_set_ops(mdesc->smp);
1120 }
f00ec48f 1121 smp_init_cpus();
8cf72172 1122 smp_build_mpidr_hash();
abcee5fb 1123 }
7bbb7940 1124#endif
4588c34d
DM
1125
1126 if (!is_smp())
1127 hyp_mode_check();
1128
3c57fb43 1129 reserve_crashkernel();
7bbb7940 1130
52108641 1131#ifdef CONFIG_MULTI_IRQ_HANDLER
1132 handle_arch_irq = mdesc->handle_irq;
1133#endif
1da177e4
LT
1134
1135#ifdef CONFIG_VT
1136#if defined(CONFIG_VGA_CONSOLE)
1137 conswitchp = &vga_con;
1138#elif defined(CONFIG_DUMMY_CONSOLE)
1139 conswitchp = &dummy_con;
1140#endif
1141#endif
dec12e62
RK
1142
1143 if (mdesc->init_early)
1144 mdesc->init_early();
1da177e4
LT
1145}
1146
1147
1148static int __init topology_init(void)
1149{
1150 int cpu;
1151
66fb8bd2
RK
1152 for_each_possible_cpu(cpu) {
1153 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1154 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1155 register_cpu(&cpuinfo->cpu, cpu);
1156 }
1da177e4
LT
1157
1158 return 0;
1159}
1da177e4
LT
1160subsys_initcall(topology_init);
1161
e119bfff
RK
1162#ifdef CONFIG_HAVE_PROC_CPU
1163static int __init proc_cpu_init(void)
1164{
1165 struct proc_dir_entry *res;
1166
1167 res = proc_mkdir("cpu", NULL);
1168 if (!res)
1169 return -ENOMEM;
1170 return 0;
1171}
1172fs_initcall(proc_cpu_init);
1173#endif
1174
1da177e4
LT
1175static const char *hwcap_str[] = {
1176 "swp",
1177 "half",
1178 "thumb",
1179 "26bit",
1180 "fastmult",
1181 "fpa",
1182 "vfp",
1183 "edsp",
1184 "java",
8f7f9435 1185 "iwmmxt",
99e4a6dd 1186 "crunch",
4369ae16 1187 "thumbee",
2bedbdf4 1188 "neon",
7279dc3e
CM
1189 "vfpv3",
1190 "vfpv3d16",
254cdf8e
WD
1191 "tls",
1192 "vfpv4",
1193 "idiva",
1194 "idivt",
ab8d46c0 1195 "vfpd32",
a469abd0 1196 "lpae",
e9faebc6 1197 "evtstrm",
1da177e4
LT
1198 NULL
1199};
1200
b342ea4e 1201static const char *hwcap2_str[] = {
8258a989
AB
1202 "aes",
1203 "pmull",
1204 "sha1",
1205 "sha2",
1206 "crc32",
b342ea4e
AB
1207 NULL
1208};
1209
1da177e4
LT
1210static int c_show(struct seq_file *m, void *v)
1211{
b4b8f770
LP
1212 int i, j;
1213 u32 cpuid;
1da177e4 1214
1da177e4 1215 for_each_online_cpu(i) {
15559722
RK
1216 /*
1217 * glibc reads /proc/cpuinfo to determine the number of
1218 * online processors, looking for lines beginning with
1219 * "processor". Give glibc what it expects.
1220 */
1221 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1222 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1223 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1224 cpu_name, cpuid & 15, elf_platform);
1225
4bf9636c
PM
1226#if defined(CONFIG_SMP)
1227 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1228 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1229 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1230#else
1231 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1232 loops_per_jiffy / (500000/HZ),
1233 (loops_per_jiffy / (5000/HZ)) % 100);
1234#endif
b4b8f770
LP
1235 /* dump out the processor features */
1236 seq_puts(m, "Features\t: ");
1da177e4 1237
b4b8f770
LP
1238 for (j = 0; hwcap_str[j]; j++)
1239 if (elf_hwcap & (1 << j))
1240 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1241
b342ea4e
AB
1242 for (j = 0; hwcap2_str[j]; j++)
1243 if (elf_hwcap2 & (1 << j))
1244 seq_printf(m, "%s ", hwcap2_str[j]);
1245
b4b8f770
LP
1246 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1247 seq_printf(m, "CPU architecture: %s\n",
1248 proc_arch[cpu_architecture()]);
1da177e4 1249
b4b8f770
LP
1250 if ((cpuid & 0x0008f000) == 0x00000000) {
1251 /* pre-ARM7 */
1252 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1253 } else {
b4b8f770
LP
1254 if ((cpuid & 0x0008f000) == 0x00007000) {
1255 /* ARM7 */
1256 seq_printf(m, "CPU variant\t: 0x%02x\n",
1257 (cpuid >> 16) & 127);
1258 } else {
1259 /* post-ARM7 */
1260 seq_printf(m, "CPU variant\t: 0x%x\n",
1261 (cpuid >> 20) & 15);
1262 }
1263 seq_printf(m, "CPU part\t: 0x%03x\n",
1264 (cpuid >> 4) & 0xfff);
1da177e4 1265 }
b4b8f770 1266 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1267 }
1da177e4
LT
1268
1269 seq_printf(m, "Hardware\t: %s\n", machine_name);
1270 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1271 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1272
1273 return 0;
1274}
1275
1276static void *c_start(struct seq_file *m, loff_t *pos)
1277{
1278 return *pos < 1 ? (void *)1 : NULL;
1279}
1280
1281static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1282{
1283 ++*pos;
1284 return NULL;
1285}
1286
1287static void c_stop(struct seq_file *m, void *v)
1288{
1289}
1290
2ffd6e18 1291const struct seq_operations cpuinfo_op = {
1da177e4
LT
1292 .start = c_start,
1293 .next = c_next,
1294 .stop = c_stop,
1295 .show = c_show
1296};