]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - arch/arm/kernel/setup.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / setup.c
... / ...
CommitLineData
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/efi.h>
11#include <linux/export.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
21#include <linux/screen_info.h>
22#include <linux/of_platform.h>
23#include <linux/init.h>
24#include <linux/kexec.h>
25#include <linux/of_fdt.h>
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
28#include <linux/smp.h>
29#include <linux/proc_fs.h>
30#include <linux/memblock.h>
31#include <linux/bug.h>
32#include <linux/compiler.h>
33#include <linux/sort.h>
34#include <linux/psci.h>
35
36#include <asm/unified.h>
37#include <asm/cp15.h>
38#include <asm/cpu.h>
39#include <asm/cputype.h>
40#include <asm/efi.h>
41#include <asm/elf.h>
42#include <asm/early_ioremap.h>
43#include <asm/fixmap.h>
44#include <asm/procinfo.h>
45#include <asm/psci.h>
46#include <asm/sections.h>
47#include <asm/setup.h>
48#include <asm/smp_plat.h>
49#include <asm/mach-types.h>
50#include <asm/cacheflush.h>
51#include <asm/cachetype.h>
52#include <asm/tlbflush.h>
53#include <asm/xen/hypervisor.h>
54
55#include <asm/prom.h>
56#include <asm/mach/arch.h>
57#include <asm/mach/irq.h>
58#include <asm/mach/time.h>
59#include <asm/system_info.h>
60#include <asm/system_misc.h>
61#include <asm/traps.h>
62#include <asm/unwind.h>
63#include <asm/memblock.h>
64#include <asm/virt.h>
65
66#include "atags.h"
67
68
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8];
71
72static int __init fpe_setup(char *line)
73{
74 memcpy(fpe_type, line, 8);
75 return 1;
76}
77
78__setup("fpe=", fpe_setup);
79#endif
80
81extern void init_default_cache_policy(unsigned long);
82extern void paging_init(const struct machine_desc *desc);
83extern void early_mm_init(const struct machine_desc *);
84extern void adjust_lowmem_bounds(void);
85extern enum reboot_mode reboot_mode;
86extern void setup_dma_zone(const struct machine_desc *desc);
87
88unsigned int processor_id;
89EXPORT_SYMBOL(processor_id);
90unsigned int __machine_arch_type __read_mostly;
91EXPORT_SYMBOL(__machine_arch_type);
92unsigned int cacheid __read_mostly;
93EXPORT_SYMBOL(cacheid);
94
95unsigned int __atags_pointer __initdata;
96
97unsigned int system_rev;
98EXPORT_SYMBOL(system_rev);
99
100const char *system_serial;
101EXPORT_SYMBOL(system_serial);
102
103unsigned int system_serial_low;
104EXPORT_SYMBOL(system_serial_low);
105
106unsigned int system_serial_high;
107EXPORT_SYMBOL(system_serial_high);
108
109unsigned int elf_hwcap __read_mostly;
110EXPORT_SYMBOL(elf_hwcap);
111
112unsigned int elf_hwcap2 __read_mostly;
113EXPORT_SYMBOL(elf_hwcap2);
114
115
116#ifdef MULTI_CPU
117struct processor processor __ro_after_init;
118#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119struct processor *cpu_vtable[NR_CPUS] = {
120 [0] = &processor,
121};
122#endif
123#endif
124#ifdef MULTI_TLB
125struct cpu_tlb_fns cpu_tlb __ro_after_init;
126#endif
127#ifdef MULTI_USER
128struct cpu_user_fns cpu_user __ro_after_init;
129#endif
130#ifdef MULTI_CACHE
131struct cpu_cache_fns cpu_cache __ro_after_init;
132#endif
133#ifdef CONFIG_OUTER_CACHE
134struct outer_cache_fns outer_cache __ro_after_init;
135EXPORT_SYMBOL(outer_cache);
136#endif
137
138/*
139 * Cached cpu_architecture() result for use by assembler code.
140 * C code should use the cpu_architecture() function instead of accessing this
141 * variable directly.
142 */
143int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
144
145struct stack {
146 u32 irq[3];
147 u32 abt[3];
148 u32 und[3];
149 u32 fiq[3];
150} ____cacheline_aligned;
151
152#ifndef CONFIG_CPU_V7M
153static struct stack stacks[NR_CPUS];
154#endif
155
156char elf_platform[ELF_PLATFORM_SIZE];
157EXPORT_SYMBOL(elf_platform);
158
159static const char *cpu_name;
160static const char *machine_name;
161static char __initdata cmd_line[COMMAND_LINE_SIZE];
162const struct machine_desc *machine_desc __initdata;
163
164static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
165#define ENDIANNESS ((char)endian_test.l)
166
167DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
168
169/*
170 * Standard memory resources
171 */
172static struct resource mem_res[] = {
173 {
174 .name = "Video RAM",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 },
179 {
180 .name = "Kernel code",
181 .start = 0,
182 .end = 0,
183 .flags = IORESOURCE_SYSTEM_RAM
184 },
185 {
186 .name = "Kernel data",
187 .start = 0,
188 .end = 0,
189 .flags = IORESOURCE_SYSTEM_RAM
190 }
191};
192
193#define video_ram mem_res[0]
194#define kernel_code mem_res[1]
195#define kernel_data mem_res[2]
196
197static struct resource io_res[] = {
198 {
199 .name = "reserved",
200 .start = 0x3bc,
201 .end = 0x3be,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 },
204 {
205 .name = "reserved",
206 .start = 0x378,
207 .end = 0x37f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 },
210 {
211 .name = "reserved",
212 .start = 0x278,
213 .end = 0x27f,
214 .flags = IORESOURCE_IO | IORESOURCE_BUSY
215 }
216};
217
218#define lp0 io_res[0]
219#define lp1 io_res[1]
220#define lp2 io_res[2]
221
222static const char *proc_arch[] = {
223 "undefined/unknown",
224 "3",
225 "4",
226 "4T",
227 "5",
228 "5T",
229 "5TE",
230 "5TEJ",
231 "6TEJ",
232 "7",
233 "7M",
234 "?(12)",
235 "?(13)",
236 "?(14)",
237 "?(15)",
238 "?(16)",
239 "?(17)",
240};
241
242#ifdef CONFIG_CPU_V7M
243static int __get_cpu_architecture(void)
244{
245 return CPU_ARCH_ARMv7M;
246}
247#else
248static int __get_cpu_architecture(void)
249{
250 int cpu_arch;
251
252 if ((read_cpuid_id() & 0x0008f000) == 0) {
253 cpu_arch = CPU_ARCH_UNKNOWN;
254 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
255 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
257 cpu_arch = (read_cpuid_id() >> 16) & 7;
258 if (cpu_arch)
259 cpu_arch += CPU_ARCH_ARMv3;
260 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
261 /* Revised CPUID format. Read the Memory Model Feature
262 * Register 0 and check for VMSAv7 or PMSAv7 */
263 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
264 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
265 (mmfr0 & 0x000000f0) >= 0x00000030)
266 cpu_arch = CPU_ARCH_ARMv7;
267 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
268 (mmfr0 & 0x000000f0) == 0x00000020)
269 cpu_arch = CPU_ARCH_ARMv6;
270 else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272 } else
273 cpu_arch = CPU_ARCH_UNKNOWN;
274
275 return cpu_arch;
276}
277#endif
278
279int __pure cpu_architecture(void)
280{
281 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
282
283 return __cpu_architecture;
284}
285
286static int cpu_has_aliasing_icache(unsigned int arch)
287{
288 int aliasing_icache;
289 unsigned int id_reg, num_sets, line_size;
290
291 /* PIPT caches never alias. */
292 if (icache_is_pipt())
293 return 0;
294
295 /* arch specifies the register format */
296 switch (arch) {
297 case CPU_ARCH_ARMv7:
298 set_csselr(CSSELR_ICACHE | CSSELR_L1);
299 isb();
300 id_reg = read_ccsidr();
301 line_size = 4 << ((id_reg & 0x7) + 2);
302 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
303 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
304 break;
305 case CPU_ARCH_ARMv6:
306 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
307 break;
308 default:
309 /* I-cache aliases will be handled by D-cache aliasing code */
310 aliasing_icache = 0;
311 }
312
313 return aliasing_icache;
314}
315
316static void __init cacheid_init(void)
317{
318 unsigned int arch = cpu_architecture();
319
320 if (arch >= CPU_ARCH_ARMv6) {
321 unsigned int cachetype = read_cpuid_cachetype();
322
323 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
324 cacheid = 0;
325 } else if ((cachetype & (7 << 29)) == 4 << 29) {
326 /* ARMv7 register format */
327 arch = CPU_ARCH_ARMv7;
328 cacheid = CACHEID_VIPT_NONALIASING;
329 switch (cachetype & (3 << 14)) {
330 case (1 << 14):
331 cacheid |= CACHEID_ASID_TAGGED;
332 break;
333 case (3 << 14):
334 cacheid |= CACHEID_PIPT;
335 break;
336 }
337 } else {
338 arch = CPU_ARCH_ARMv6;
339 if (cachetype & (1 << 23))
340 cacheid = CACHEID_VIPT_ALIASING;
341 else
342 cacheid = CACHEID_VIPT_NONALIASING;
343 }
344 if (cpu_has_aliasing_icache(arch))
345 cacheid |= CACHEID_VIPT_I_ALIASING;
346 } else {
347 cacheid = CACHEID_VIVT;
348 }
349
350 pr_info("CPU: %s data cache, %s instruction cache\n",
351 cache_is_vivt() ? "VIVT" :
352 cache_is_vipt_aliasing() ? "VIPT aliasing" :
353 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
354 cache_is_vivt() ? "VIVT" :
355 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
356 icache_is_vipt_aliasing() ? "VIPT aliasing" :
357 icache_is_pipt() ? "PIPT" :
358 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
359}
360
361/*
362 * These functions re-use the assembly code in head.S, which
363 * already provide the required functionality.
364 */
365extern struct proc_info_list *lookup_processor_type(unsigned int);
366
367void __init early_print(const char *str, ...)
368{
369 extern void printascii(const char *);
370 char buf[256];
371 va_list ap;
372
373 va_start(ap, str);
374 vsnprintf(buf, sizeof(buf), str, ap);
375 va_end(ap);
376
377#ifdef CONFIG_DEBUG_LL
378 printascii(buf);
379#endif
380 printk("%s", buf);
381}
382
383#ifdef CONFIG_ARM_PATCH_IDIV
384
385static inline u32 __attribute_const__ sdiv_instruction(void)
386{
387 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
388 /* "sdiv r0, r0, r1" */
389 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
390 return __opcode_to_mem_thumb32(insn);
391 }
392
393 /* "sdiv r0, r0, r1" */
394 return __opcode_to_mem_arm(0xe710f110);
395}
396
397static inline u32 __attribute_const__ udiv_instruction(void)
398{
399 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
400 /* "udiv r0, r0, r1" */
401 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
402 return __opcode_to_mem_thumb32(insn);
403 }
404
405 /* "udiv r0, r0, r1" */
406 return __opcode_to_mem_arm(0xe730f110);
407}
408
409static inline u32 __attribute_const__ bx_lr_instruction(void)
410{
411 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
412 /* "bx lr; nop" */
413 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
414 return __opcode_to_mem_thumb32(insn);
415 }
416
417 /* "bx lr" */
418 return __opcode_to_mem_arm(0xe12fff1e);
419}
420
421static void __init patch_aeabi_idiv(void)
422{
423 extern void __aeabi_uidiv(void);
424 extern void __aeabi_idiv(void);
425 uintptr_t fn_addr;
426 unsigned int mask;
427
428 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
429 if (!(elf_hwcap & mask))
430 return;
431
432 pr_info("CPU: div instructions available: patching division code\n");
433
434 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
435 asm ("" : "+g" (fn_addr));
436 ((u32 *)fn_addr)[0] = udiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
439
440 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
441 asm ("" : "+g" (fn_addr));
442 ((u32 *)fn_addr)[0] = sdiv_instruction();
443 ((u32 *)fn_addr)[1] = bx_lr_instruction();
444 flush_icache_range(fn_addr, fn_addr + 8);
445}
446
447#else
448static inline void patch_aeabi_idiv(void) { }
449#endif
450
451static void __init cpuid_init_hwcaps(void)
452{
453 int block;
454 u32 isar5;
455
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
458
459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
461 elf_hwcap |= HWCAP_IDIVA;
462 if (block >= 1)
463 elf_hwcap |= HWCAP_IDIVT;
464
465 /* LPAE implies atomic ldrd/strd instructions */
466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
468 elf_hwcap |= HWCAP_LPAE;
469
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
472
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
478
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
482
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
486
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
490}
491
492static void __init elf_hwcap_fixup(void)
493{
494 unsigned id = read_cpuid_id();
495
496 /*
497 * HWCAP_TLS is available only on 1136 r1p0 and later,
498 * see also kuser_get_tls_init.
499 */
500 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
501 ((id >> 20) & 3) == 0) {
502 elf_hwcap &= ~HWCAP_TLS;
503 return;
504 }
505
506 /* Verify if CPUID scheme is implemented */
507 if ((id & 0x000f0000) != 0x000f0000)
508 return;
509
510 /*
511 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
512 * avoid advertising SWP; it may not be atomic with
513 * multiprocessing cores.
514 */
515 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
516 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
517 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
518 elf_hwcap &= ~HWCAP_SWP;
519}
520
521/*
522 * cpu_init - initialise one CPU.
523 *
524 * cpu_init sets up the per-CPU stacks.
525 */
526void notrace cpu_init(void)
527{
528#ifndef CONFIG_CPU_V7M
529 unsigned int cpu = smp_processor_id();
530 struct stack *stk = &stacks[cpu];
531
532 if (cpu >= NR_CPUS) {
533 pr_crit("CPU%u: bad primary CPU number\n", cpu);
534 BUG();
535 }
536
537 /*
538 * This only works on resume and secondary cores. For booting on the
539 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
540 */
541 set_my_cpu_offset(per_cpu_offset(cpu));
542
543 cpu_proc_init();
544
545 /*
546 * Define the placement constraint for the inline asm directive below.
547 * In Thumb-2, msr with an immediate value is not allowed.
548 */
549#ifdef CONFIG_THUMB2_KERNEL
550#define PLC "r"
551#else
552#define PLC "I"
553#endif
554
555 /*
556 * setup stacks for re-entrant exception handlers
557 */
558 __asm__ (
559 "msr cpsr_c, %1\n\t"
560 "add r14, %0, %2\n\t"
561 "mov sp, r14\n\t"
562 "msr cpsr_c, %3\n\t"
563 "add r14, %0, %4\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %5\n\t"
566 "add r14, %0, %6\n\t"
567 "mov sp, r14\n\t"
568 "msr cpsr_c, %7\n\t"
569 "add r14, %0, %8\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %9"
572 :
573 : "r" (stk),
574 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
575 "I" (offsetof(struct stack, irq[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
577 "I" (offsetof(struct stack, abt[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
579 "I" (offsetof(struct stack, und[0])),
580 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
581 "I" (offsetof(struct stack, fiq[0])),
582 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
583 : "r14");
584#endif
585}
586
587u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
588
589void __init smp_setup_processor_id(void)
590{
591 int i;
592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
594
595 cpu_logical_map(0) = cpu;
596 for (i = 1; i < nr_cpu_ids; ++i)
597 cpu_logical_map(i) = i == cpu ? 0 : i;
598
599 /*
600 * clear __my_cpu_offset on boot CPU to avoid hang caused by
601 * using percpu variable early, for example, lockdep will
602 * access percpu variable inside lock_release
603 */
604 set_my_cpu_offset(0);
605
606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
607}
608
609struct mpidr_hash mpidr_hash;
610#ifdef CONFIG_SMP
611/**
612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
613 * level in order to build a linear index from an
614 * MPIDR value. Resulting algorithm is a collision
615 * free hash carried out through shifting and ORing
616 */
617static void __init smp_build_mpidr_hash(void)
618{
619 u32 i, affinity;
620 u32 fs[3], bits[3], ls, mask = 0;
621 /*
622 * Pre-scan the list of MPIDRS and filter out bits that do
623 * not contribute to affinity levels, ie they never toggle.
624 */
625 for_each_possible_cpu(i)
626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
627 pr_debug("mask of set bits 0x%x\n", mask);
628 /*
629 * Find and stash the last and first bit set at all affinity levels to
630 * check how many bits are required to represent them.
631 */
632 for (i = 0; i < 3; i++) {
633 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
634 /*
635 * Find the MSB bit and LSB bits position
636 * to determine how many bits are required
637 * to express the affinity level.
638 */
639 ls = fls(affinity);
640 fs[i] = affinity ? ffs(affinity) - 1 : 0;
641 bits[i] = ls - fs[i];
642 }
643 /*
644 * An index can be created from the MPIDR by isolating the
645 * significant bits at each affinity level and by shifting
646 * them in order to compress the 24 bits values space to a
647 * compressed set of values. This is equivalent to hashing
648 * the MPIDR through shifting and ORing. It is a collision free
649 * hash though not minimal since some levels might contain a number
650 * of CPUs that is not an exact power of 2 and their bit
651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
652 */
653 mpidr_hash.shift_aff[0] = fs[0];
654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
656 (bits[1] + bits[0]);
657 mpidr_hash.mask = mask;
658 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
660 mpidr_hash.shift_aff[0],
661 mpidr_hash.shift_aff[1],
662 mpidr_hash.shift_aff[2],
663 mpidr_hash.mask,
664 mpidr_hash.bits);
665 /*
666 * 4x is an arbitrary value used to warn on a hash table much bigger
667 * than expected on most systems.
668 */
669 if (mpidr_hash_size() > 4 * num_possible_cpus())
670 pr_warn("Large number of MPIDR hash buckets detected\n");
671 sync_cache_w(&mpidr_hash);
672}
673#endif
674
675/*
676 * locate processor in the list of supported processor types. The linker
677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
678 */
679struct proc_info_list *lookup_processor(u32 midr)
680{
681 struct proc_info_list *list = lookup_processor_type(midr);
682
683 if (!list) {
684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
685 smp_processor_id(), midr);
686 while (1)
687 /* can't use cpu_relax() here as it may require MMU setup */;
688 }
689
690 return list;
691}
692
693static void __init setup_processor(void)
694{
695 unsigned int midr = read_cpuid_id();
696 struct proc_info_list *list = lookup_processor(midr);
697
698 cpu_name = list->cpu_name;
699 __cpu_architecture = __get_cpu_architecture();
700
701 init_proc_vtable(list->proc);
702#ifdef MULTI_TLB
703 cpu_tlb = *list->tlb;
704#endif
705#ifdef MULTI_USER
706 cpu_user = *list->user;
707#endif
708#ifdef MULTI_CACHE
709 cpu_cache = *list->cache;
710#endif
711
712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
713 list->cpu_name, midr, midr & 15,
714 proc_arch[cpu_architecture()], get_cr());
715
716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
717 list->arch_name, ENDIANNESS);
718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
719 list->elf_name, ENDIANNESS);
720 elf_hwcap = list->elf_hwcap;
721
722 cpuid_init_hwcaps();
723 patch_aeabi_idiv();
724
725#ifndef CONFIG_ARM_THUMB
726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
727#endif
728#ifdef CONFIG_MMU
729 init_default_cache_policy(list->__cpu_mm_mmu_flags);
730#endif
731 erratum_a15_798181_init();
732
733 elf_hwcap_fixup();
734
735 cacheid_init();
736 cpu_init();
737}
738
739void __init dump_machine_table(void)
740{
741 const struct machine_desc *p;
742
743 early_print("Available machine support:\n\nID (hex)\tNAME\n");
744 for_each_machine_desc(p)
745 early_print("%08x\t%s\n", p->nr, p->name);
746
747 early_print("\nPlease check your kernel config and/or bootloader.\n");
748
749 while (true)
750 /* can't use cpu_relax() here as it may require MMU setup */;
751}
752
753int __init arm_add_memory(u64 start, u64 size)
754{
755 u64 aligned_start;
756
757 /*
758 * Ensure that start/size are aligned to a page boundary.
759 * Size is rounded down, start is rounded up.
760 */
761 aligned_start = PAGE_ALIGN(start);
762 if (aligned_start > start + size)
763 size = 0;
764 else
765 size -= aligned_start - start;
766
767#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
768 if (aligned_start > ULONG_MAX) {
769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
770 (long long)start);
771 return -EINVAL;
772 }
773
774 if (aligned_start + size > ULONG_MAX) {
775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
776 (long long)start);
777 /*
778 * To ensure bank->start + bank->size is representable in
779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
780 * This means we lose a page after masking.
781 */
782 size = ULONG_MAX - aligned_start;
783 }
784#endif
785
786 if (aligned_start < PHYS_OFFSET) {
787 if (aligned_start + size <= PHYS_OFFSET) {
788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
789 aligned_start, aligned_start + size);
790 return -EINVAL;
791 }
792
793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
794 aligned_start, (u64)PHYS_OFFSET);
795
796 size -= PHYS_OFFSET - aligned_start;
797 aligned_start = PHYS_OFFSET;
798 }
799
800 start = aligned_start;
801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
802
803 /*
804 * Check whether this memory region has non-zero size or
805 * invalid node number.
806 */
807 if (size == 0)
808 return -EINVAL;
809
810 memblock_add(start, size);
811 return 0;
812}
813
814/*
815 * Pick out the memory size. We look for mem=size@start,
816 * where start and size are "size[KkMm]"
817 */
818
819static int __init early_mem(char *p)
820{
821 static int usermem __initdata = 0;
822 u64 size;
823 u64 start;
824 char *endp;
825
826 /*
827 * If the user specifies memory size, we
828 * blow away any automatically generated
829 * size.
830 */
831 if (usermem == 0) {
832 usermem = 1;
833 memblock_remove(memblock_start_of_DRAM(),
834 memblock_end_of_DRAM() - memblock_start_of_DRAM());
835 }
836
837 start = PHYS_OFFSET;
838 size = memparse(p, &endp);
839 if (*endp == '@')
840 start = memparse(endp + 1, NULL);
841
842 arm_add_memory(start, size);
843
844 return 0;
845}
846early_param("mem", early_mem);
847
848static void __init request_standard_resources(const struct machine_desc *mdesc)
849{
850 struct memblock_region *region;
851 struct resource *res;
852
853 kernel_code.start = virt_to_phys(_text);
854 kernel_code.end = virt_to_phys(__init_begin - 1);
855 kernel_data.start = virt_to_phys(_sdata);
856 kernel_data.end = virt_to_phys(_end - 1);
857
858 for_each_memblock(memory, region) {
859 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
860 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
861 unsigned long boot_alias_start;
862
863 /*
864 * Some systems have a special memory alias which is only
865 * used for booting. We need to advertise this region to
866 * kexec-tools so they know where bootable RAM is located.
867 */
868 boot_alias_start = phys_to_idmap(start);
869 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
870 res = memblock_virt_alloc(sizeof(*res), 0);
871 res->name = "System RAM (boot alias)";
872 res->start = boot_alias_start;
873 res->end = phys_to_idmap(end);
874 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
875 request_resource(&iomem_resource, res);
876 }
877
878 res = memblock_virt_alloc(sizeof(*res), 0);
879 res->name = "System RAM";
880 res->start = start;
881 res->end = end;
882 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
883
884 request_resource(&iomem_resource, res);
885
886 if (kernel_code.start >= res->start &&
887 kernel_code.end <= res->end)
888 request_resource(res, &kernel_code);
889 if (kernel_data.start >= res->start &&
890 kernel_data.end <= res->end)
891 request_resource(res, &kernel_data);
892 }
893
894 if (mdesc->video_start) {
895 video_ram.start = mdesc->video_start;
896 video_ram.end = mdesc->video_end;
897 request_resource(&iomem_resource, &video_ram);
898 }
899
900 /*
901 * Some machines don't have the possibility of ever
902 * possessing lp0, lp1 or lp2
903 */
904 if (mdesc->reserve_lp0)
905 request_resource(&ioport_resource, &lp0);
906 if (mdesc->reserve_lp1)
907 request_resource(&ioport_resource, &lp1);
908 if (mdesc->reserve_lp2)
909 request_resource(&ioport_resource, &lp2);
910}
911
912#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
913 defined(CONFIG_EFI)
914struct screen_info screen_info = {
915 .orig_video_lines = 30,
916 .orig_video_cols = 80,
917 .orig_video_mode = 0,
918 .orig_video_ega_bx = 0,
919 .orig_video_isVGA = 1,
920 .orig_video_points = 8
921};
922#endif
923
924static int __init customize_machine(void)
925{
926 /*
927 * customizes platform devices, or adds new ones
928 * On DT based machines, we fall back to populating the
929 * machine from the device tree, if no callback is provided,
930 * otherwise we would always need an init_machine callback.
931 */
932 if (machine_desc->init_machine)
933 machine_desc->init_machine();
934
935 return 0;
936}
937arch_initcall(customize_machine);
938
939static int __init init_machine_late(void)
940{
941 struct device_node *root;
942 int ret;
943
944 if (machine_desc->init_late)
945 machine_desc->init_late();
946
947 root = of_find_node_by_path("/");
948 if (root) {
949 ret = of_property_read_string(root, "serial-number",
950 &system_serial);
951 if (ret)
952 system_serial = NULL;
953 }
954
955 if (!system_serial)
956 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
957 system_serial_high,
958 system_serial_low);
959
960 return 0;
961}
962late_initcall(init_machine_late);
963
964#ifdef CONFIG_KEXEC
965/*
966 * The crash region must be aligned to 128MB to avoid
967 * zImage relocating below the reserved region.
968 */
969#define CRASH_ALIGN (128 << 20)
970
971static inline unsigned long long get_total_mem(void)
972{
973 unsigned long total;
974
975 total = max_low_pfn - min_low_pfn;
976 return total << PAGE_SHIFT;
977}
978
979/**
980 * reserve_crashkernel() - reserves memory are for crash kernel
981 *
982 * This function reserves memory area given in "crashkernel=" kernel command
983 * line parameter. The memory reserved is used by a dump capture kernel when
984 * primary kernel is crashing.
985 */
986static void __init reserve_crashkernel(void)
987{
988 unsigned long long crash_size, crash_base;
989 unsigned long long total_mem;
990 int ret;
991
992 total_mem = get_total_mem();
993 ret = parse_crashkernel(boot_command_line, total_mem,
994 &crash_size, &crash_base);
995 if (ret)
996 return;
997
998 if (crash_base <= 0) {
999 unsigned long long crash_max = idmap_to_phys((u32)~0);
1000 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1001 if (crash_max > lowmem_max)
1002 crash_max = lowmem_max;
1003 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1004 crash_size, CRASH_ALIGN);
1005 if (!crash_base) {
1006 pr_err("crashkernel reservation failed - No suitable area found.\n");
1007 return;
1008 }
1009 } else {
1010 unsigned long long start;
1011
1012 start = memblock_find_in_range(crash_base,
1013 crash_base + crash_size,
1014 crash_size, SECTION_SIZE);
1015 if (start != crash_base) {
1016 pr_err("crashkernel reservation failed - memory is in use.\n");
1017 return;
1018 }
1019 }
1020
1021 ret = memblock_reserve(crash_base, crash_size);
1022 if (ret < 0) {
1023 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1024 (unsigned long)crash_base);
1025 return;
1026 }
1027
1028 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1029 (unsigned long)(crash_size >> 20),
1030 (unsigned long)(crash_base >> 20),
1031 (unsigned long)(total_mem >> 20));
1032
1033 /* The crashk resource must always be located in normal mem */
1034 crashk_res.start = crash_base;
1035 crashk_res.end = crash_base + crash_size - 1;
1036 insert_resource(&iomem_resource, &crashk_res);
1037
1038 if (arm_has_idmap_alias()) {
1039 /*
1040 * If we have a special RAM alias for use at boot, we
1041 * need to advertise to kexec tools where the alias is.
1042 */
1043 static struct resource crashk_boot_res = {
1044 .name = "Crash kernel (boot alias)",
1045 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1046 };
1047
1048 crashk_boot_res.start = phys_to_idmap(crash_base);
1049 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_boot_res);
1051 }
1052}
1053#else
1054static inline void reserve_crashkernel(void) {}
1055#endif /* CONFIG_KEXEC */
1056
1057void __init hyp_mode_check(void)
1058{
1059#ifdef CONFIG_ARM_VIRT_EXT
1060 sync_boot_mode();
1061
1062 if (is_hyp_mode_available()) {
1063 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1064 pr_info("CPU: Virtualization extensions available.\n");
1065 } else if (is_hyp_mode_mismatched()) {
1066 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1067 __boot_cpu_mode & MODE_MASK);
1068 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1069 } else
1070 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1071#endif
1072}
1073
1074void __init setup_arch(char **cmdline_p)
1075{
1076 const struct machine_desc *mdesc;
1077
1078 setup_processor();
1079 mdesc = setup_machine_fdt(__atags_pointer);
1080 if (!mdesc)
1081 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1082 if (!mdesc) {
1083 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1084 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1085 __atags_pointer);
1086 if (__atags_pointer)
1087 early_print(" r2[]=%*ph\n", 16,
1088 phys_to_virt(__atags_pointer));
1089 dump_machine_table();
1090 }
1091
1092 machine_desc = mdesc;
1093 machine_name = mdesc->name;
1094 dump_stack_set_arch_desc("%s", mdesc->name);
1095
1096 if (mdesc->reboot_mode != REBOOT_HARD)
1097 reboot_mode = mdesc->reboot_mode;
1098
1099 init_mm.start_code = (unsigned long) _text;
1100 init_mm.end_code = (unsigned long) _etext;
1101 init_mm.end_data = (unsigned long) _edata;
1102 init_mm.brk = (unsigned long) _end;
1103
1104 /* populate cmd_line too for later use, preserving boot_command_line */
1105 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1106 *cmdline_p = cmd_line;
1107
1108 early_fixmap_init();
1109 early_ioremap_init();
1110
1111 parse_early_param();
1112
1113#ifdef CONFIG_MMU
1114 early_mm_init(mdesc);
1115#endif
1116 setup_dma_zone(mdesc);
1117 xen_early_init();
1118 efi_init();
1119 /*
1120 * Make sure the calculation for lowmem/highmem is set appropriately
1121 * before reserving/allocating any mmeory
1122 */
1123 adjust_lowmem_bounds();
1124 arm_memblock_init(mdesc);
1125 /* Memory may have been removed so recalculate the bounds. */
1126 adjust_lowmem_bounds();
1127
1128 early_ioremap_reset();
1129
1130 paging_init(mdesc);
1131 request_standard_resources(mdesc);
1132
1133 if (mdesc->restart)
1134 arm_pm_restart = mdesc->restart;
1135
1136 unflatten_device_tree();
1137
1138 arm_dt_init_cpu_maps();
1139 psci_dt_init();
1140#ifdef CONFIG_SMP
1141 if (is_smp()) {
1142 if (!mdesc->smp_init || !mdesc->smp_init()) {
1143 if (psci_smp_available())
1144 smp_set_ops(&psci_smp_ops);
1145 else if (mdesc->smp)
1146 smp_set_ops(mdesc->smp);
1147 }
1148 smp_init_cpus();
1149 smp_build_mpidr_hash();
1150 }
1151#endif
1152
1153 if (!is_smp())
1154 hyp_mode_check();
1155
1156 reserve_crashkernel();
1157
1158#ifdef CONFIG_MULTI_IRQ_HANDLER
1159 handle_arch_irq = mdesc->handle_irq;
1160#endif
1161
1162#ifdef CONFIG_VT
1163#if defined(CONFIG_VGA_CONSOLE)
1164 conswitchp = &vga_con;
1165#elif defined(CONFIG_DUMMY_CONSOLE)
1166 conswitchp = &dummy_con;
1167#endif
1168#endif
1169
1170 if (mdesc->init_early)
1171 mdesc->init_early();
1172}
1173
1174
1175static int __init topology_init(void)
1176{
1177 int cpu;
1178
1179 for_each_possible_cpu(cpu) {
1180 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1181 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1182 register_cpu(&cpuinfo->cpu, cpu);
1183 }
1184
1185 return 0;
1186}
1187subsys_initcall(topology_init);
1188
1189#ifdef CONFIG_HAVE_PROC_CPU
1190static int __init proc_cpu_init(void)
1191{
1192 struct proc_dir_entry *res;
1193
1194 res = proc_mkdir("cpu", NULL);
1195 if (!res)
1196 return -ENOMEM;
1197 return 0;
1198}
1199fs_initcall(proc_cpu_init);
1200#endif
1201
1202static const char *hwcap_str[] = {
1203 "swp",
1204 "half",
1205 "thumb",
1206 "26bit",
1207 "fastmult",
1208 "fpa",
1209 "vfp",
1210 "edsp",
1211 "java",
1212 "iwmmxt",
1213 "crunch",
1214 "thumbee",
1215 "neon",
1216 "vfpv3",
1217 "vfpv3d16",
1218 "tls",
1219 "vfpv4",
1220 "idiva",
1221 "idivt",
1222 "vfpd32",
1223 "lpae",
1224 "evtstrm",
1225 NULL
1226};
1227
1228static const char *hwcap2_str[] = {
1229 "aes",
1230 "pmull",
1231 "sha1",
1232 "sha2",
1233 "crc32",
1234 NULL
1235};
1236
1237static int c_show(struct seq_file *m, void *v)
1238{
1239 int i, j;
1240 u32 cpuid;
1241
1242 for_each_online_cpu(i) {
1243 /*
1244 * glibc reads /proc/cpuinfo to determine the number of
1245 * online processors, looking for lines beginning with
1246 * "processor". Give glibc what it expects.
1247 */
1248 seq_printf(m, "processor\t: %d\n", i);
1249 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1250 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1251 cpu_name, cpuid & 15, elf_platform);
1252
1253#if defined(CONFIG_SMP)
1254 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1255 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1256 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1257#else
1258 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1259 loops_per_jiffy / (500000/HZ),
1260 (loops_per_jiffy / (5000/HZ)) % 100);
1261#endif
1262 /* dump out the processor features */
1263 seq_puts(m, "Features\t: ");
1264
1265 for (j = 0; hwcap_str[j]; j++)
1266 if (elf_hwcap & (1 << j))
1267 seq_printf(m, "%s ", hwcap_str[j]);
1268
1269 for (j = 0; hwcap2_str[j]; j++)
1270 if (elf_hwcap2 & (1 << j))
1271 seq_printf(m, "%s ", hwcap2_str[j]);
1272
1273 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1274 seq_printf(m, "CPU architecture: %s\n",
1275 proc_arch[cpu_architecture()]);
1276
1277 if ((cpuid & 0x0008f000) == 0x00000000) {
1278 /* pre-ARM7 */
1279 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1280 } else {
1281 if ((cpuid & 0x0008f000) == 0x00007000) {
1282 /* ARM7 */
1283 seq_printf(m, "CPU variant\t: 0x%02x\n",
1284 (cpuid >> 16) & 127);
1285 } else {
1286 /* post-ARM7 */
1287 seq_printf(m, "CPU variant\t: 0x%x\n",
1288 (cpuid >> 20) & 15);
1289 }
1290 seq_printf(m, "CPU part\t: 0x%03x\n",
1291 (cpuid >> 4) & 0xfff);
1292 }
1293 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1294 }
1295
1296 seq_printf(m, "Hardware\t: %s\n", machine_name);
1297 seq_printf(m, "Revision\t: %04x\n", system_rev);
1298 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1299
1300 return 0;
1301}
1302
1303static void *c_start(struct seq_file *m, loff_t *pos)
1304{
1305 return *pos < 1 ? (void *)1 : NULL;
1306}
1307
1308static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1309{
1310 ++*pos;
1311 return NULL;
1312}
1313
1314static void c_stop(struct seq_file *m, void *v)
1315{
1316}
1317
1318const struct seq_operations cpuinfo_op = {
1319 .start = c_start,
1320 .next = c_next,
1321 .stop = c_stop,
1322 .show = c_show
1323};