]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/setup.c
ARM64 / ACPI: If we chose to boot from acpi then disable FDT
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
37655163 20#include <linux/acpi.h>
9703d9d7
CM
21#include <linux/export.h>
22#include <linux/kernel.h>
23#include <linux/stddef.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/utsname.h>
27#include <linux/initrd.h>
28#include <linux/console.h>
a41dc0e8 29#include <linux/cache.h>
9703d9d7
CM
30#include <linux/bootmem.h>
31#include <linux/seq_file.h>
32#include <linux/screen_info.h>
33#include <linux/init.h>
34#include <linux/kexec.h>
35#include <linux/crash_dump.h>
36#include <linux/root_dev.h>
de79a64d 37#include <linux/clk-provider.h>
9703d9d7
CM
38#include <linux/cpu.h>
39#include <linux/interrupt.h>
40#include <linux/smp.h>
41#include <linux/fs.h>
42#include <linux/proc_fs.h>
43#include <linux/memblock.h>
78d51e0b 44#include <linux/of_iommu.h>
9703d9d7 45#include <linux/of_fdt.h>
d6bafb9b 46#include <linux/of_platform.h>
f84d0275 47#include <linux/efi.h>
44b82b77 48#include <linux/personality.h>
9703d9d7 49
37655163 50#include <asm/acpi.h>
bf4b558e 51#include <asm/fixmap.h>
df857416 52#include <asm/cpu.h>
9703d9d7
CM
53#include <asm/cputype.h>
54#include <asm/elf.h>
55#include <asm/cputable.h>
930da09f 56#include <asm/cpufeature.h>
e8765b26 57#include <asm/cpu_ops.h>
9703d9d7
CM
58#include <asm/sections.h>
59#include <asm/setup.h>
4c7aa002 60#include <asm/smp_plat.h>
9703d9d7
CM
61#include <asm/cacheflush.h>
62#include <asm/tlbflush.h>
63#include <asm/traps.h>
64#include <asm/memblock.h>
e790f1de 65#include <asm/psci.h>
f84d0275 66#include <asm/efi.h>
9703d9d7
CM
67
68unsigned int processor_id;
69EXPORT_SYMBOL(processor_id);
70
25804e6a 71unsigned long elf_hwcap __read_mostly;
9703d9d7
CM
72EXPORT_SYMBOL_GPL(elf_hwcap);
73
46efe547
SH
74#ifdef CONFIG_COMPAT
75#define COMPAT_ELF_HWCAP_DEFAULT \
76 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
77 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
78 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
79 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
7d57511d
CM
80 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
81 COMPAT_HWCAP_LPAE)
46efe547 82unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
28964d32 83unsigned int compat_elf_hwcap2 __read_mostly;
46efe547
SH
84#endif
85
06f9eb88 86DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
930da09f 87
9703d9d7 88static const char *cpu_name;
9703d9d7
CM
89phys_addr_t __fdt_pointer __initdata;
90
91/*
92 * Standard memory resources
93 */
94static struct resource mem_res[] = {
95 {
96 .name = "Kernel code",
97 .start = 0,
98 .end = 0,
99 .flags = IORESOURCE_MEM
100 },
101 {
102 .name = "Kernel data",
103 .start = 0,
104 .end = 0,
105 .flags = IORESOURCE_MEM
106 }
107};
108
109#define kernel_code mem_res[0]
110#define kernel_data mem_res[1]
111
112void __init early_print(const char *str, ...)
113{
114 char buf[256];
115 va_list ap;
116
117 va_start(ap, str);
118 vsnprintf(buf, sizeof(buf), str, ap);
119 va_end(ap);
120
121 printk("%s", buf);
122}
123
71586276
WD
124void __init smp_setup_processor_id(void)
125{
80708677
MR
126 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
127 cpu_logical_map(0) = mpidr;
128
71586276
WD
129 /*
130 * clear __my_cpu_offset on boot CPU to avoid hang caused by
131 * using percpu variable early, for example, lockdep will
132 * access percpu variable inside lock_release
133 */
134 set_my_cpu_offset(0);
80708677 135 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
71586276
WD
136}
137
6e15d0e0
SH
138bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
139{
140 return phys_id == cpu_logical_map(cpu);
141}
142
976d7d3f
LP
143struct mpidr_hash mpidr_hash;
144#ifdef CONFIG_SMP
145/**
146 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
147 * level in order to build a linear index from an
148 * MPIDR value. Resulting algorithm is a collision
149 * free hash carried out through shifting and ORing
150 */
151static void __init smp_build_mpidr_hash(void)
152{
153 u32 i, affinity, fs[4], bits[4], ls;
154 u64 mask = 0;
155 /*
156 * Pre-scan the list of MPIDRS and filter out bits that do
157 * not contribute to affinity levels, ie they never toggle.
158 */
159 for_each_possible_cpu(i)
160 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
161 pr_debug("mask of set bits %#llx\n", mask);
162 /*
163 * Find and stash the last and first bit set at all affinity levels to
164 * check how many bits are required to represent them.
165 */
166 for (i = 0; i < 4; i++) {
167 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
168 /*
169 * Find the MSB bit and LSB bits position
170 * to determine how many bits are required
171 * to express the affinity level.
172 */
173 ls = fls(affinity);
174 fs[i] = affinity ? ffs(affinity) - 1 : 0;
175 bits[i] = ls - fs[i];
176 }
177 /*
178 * An index can be created from the MPIDR_EL1 by isolating the
179 * significant bits at each affinity level and by shifting
180 * them in order to compress the 32 bits values space to a
181 * compressed set of values. This is equivalent to hashing
182 * the MPIDR_EL1 through shifting and ORing. It is a collision free
183 * hash though not minimal since some levels might contain a number
184 * of CPUs that is not an exact power of 2 and their bit
185 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
186 */
187 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
188 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
189 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
190 (bits[1] + bits[0]);
191 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
192 fs[3] - (bits[2] + bits[1] + bits[0]);
193 mpidr_hash.mask = mask;
194 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
195 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
196 mpidr_hash.shift_aff[0],
197 mpidr_hash.shift_aff[1],
198 mpidr_hash.shift_aff[2],
199 mpidr_hash.shift_aff[3],
200 mpidr_hash.mask,
201 mpidr_hash.bits);
202 /*
203 * 4x is an arbitrary value used to warn on a hash table much bigger
204 * than expected on most systems.
205 */
206 if (mpidr_hash_size() > 4 * num_possible_cpus())
207 pr_warn("Large number of MPIDR hash buckets detected\n");
208 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
209}
210#endif
211
9703d9d7
CM
212static void __init setup_processor(void)
213{
214 struct cpu_info *cpu_info;
4bff28cc 215 u64 features, block;
a41dc0e8
CM
216 u32 cwg;
217 int cls;
9703d9d7 218
9703d9d7
CM
219 cpu_info = lookup_processor_type(read_cpuid_id());
220 if (!cpu_info) {
221 printk("CPU configuration botched (ID %08x), unable to continue.\n",
222 read_cpuid_id());
223 while (1);
224 }
225
226 cpu_name = cpu_info->cpu_name;
227
228 printk("CPU: %s [%08x] revision %d\n",
229 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
230
94ed1f2c 231 sprintf(init_utsname()->machine, ELF_PLATFORM);
9703d9d7 232 elf_hwcap = 0;
4bff28cc 233
df857416
MR
234 cpuinfo_store_boot_cpu();
235
a41dc0e8
CM
236 /*
237 * Check for sane CTR_EL0.CWG value.
238 */
239 cwg = cache_type_cwg();
240 cls = cache_line_size();
241 if (!cwg)
242 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
243 cls);
244 if (L1_CACHE_BYTES < cls)
245 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
246 L1_CACHE_BYTES, cls);
247
4bff28cc
SC
248 /*
249 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
250 * The blocks we test below represent incremental functionality
251 * for non-negative values. Negative values are reserved.
252 */
253 features = read_cpuid(ID_AA64ISAR0_EL1);
254 block = (features >> 4) & 0xf;
255 if (!(block & 0x8)) {
256 switch (block) {
257 default:
258 case 2:
259 elf_hwcap |= HWCAP_PMULL;
260 case 1:
261 elf_hwcap |= HWCAP_AES;
262 case 0:
263 break;
264 }
265 }
266
267 block = (features >> 8) & 0xf;
268 if (block && !(block & 0x8))
269 elf_hwcap |= HWCAP_SHA1;
270
271 block = (features >> 12) & 0xf;
272 if (block && !(block & 0x8))
273 elf_hwcap |= HWCAP_SHA2;
274
275 block = (features >> 16) & 0xf;
276 if (block && !(block & 0x8))
277 elf_hwcap |= HWCAP_CRC32;
4cf761cd
AB
278
279#ifdef CONFIG_COMPAT
280 /*
281 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
282 * the Aarch32 32-bit execution state.
283 */
284 features = read_cpuid(ID_ISAR5_EL1);
285 block = (features >> 4) & 0xf;
286 if (!(block & 0x8)) {
287 switch (block) {
288 default:
289 case 2:
290 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
291 case 1:
292 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
293 case 0:
294 break;
295 }
296 }
297
298 block = (features >> 8) & 0xf;
299 if (block && !(block & 0x8))
300 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
301
302 block = (features >> 12) & 0xf;
303 if (block && !(block & 0x8))
304 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
305
306 block = (features >> 16) & 0xf;
307 if (block && !(block & 0x8))
308 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
309#endif
9703d9d7
CM
310}
311
312static void __init setup_machine_fdt(phys_addr_t dt_phys)
313{
d5189cc5 314 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
9703d9d7
CM
315 early_print("\n"
316 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
d5189cc5 317 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
9703d9d7 318 "\nPlease check your bootloader.\n",
d5189cc5 319 dt_phys, phys_to_virt(dt_phys));
9703d9d7
CM
320
321 while (true)
322 cpu_relax();
323 }
5e39977e 324
44b82b77 325 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
9703d9d7
CM
326}
327
9703d9d7
CM
328static void __init request_standard_resources(void)
329{
330 struct memblock_region *region;
331 struct resource *res;
332
333 kernel_code.start = virt_to_phys(_text);
334 kernel_code.end = virt_to_phys(_etext - 1);
335 kernel_data.start = virt_to_phys(_sdata);
336 kernel_data.end = virt_to_phys(_end - 1);
337
338 for_each_memblock(memory, region) {
339 res = alloc_bootmem_low(sizeof(*res));
340 res->name = "System RAM";
341 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
342 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
343 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
344
345 request_resource(&iomem_resource, res);
346
347 if (kernel_code.start >= res->start &&
348 kernel_code.end <= res->end)
349 request_resource(res, &kernel_code);
350 if (kernel_data.start >= res->start &&
351 kernel_data.end <= res->end)
352 request_resource(res, &kernel_data);
353 }
354}
355
4c7aa002
JM
356u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
357
9703d9d7
CM
358void __init setup_arch(char **cmdline_p)
359{
360 setup_processor();
361
362 setup_machine_fdt(__fdt_pointer);
363
364 init_mm.start_code = (unsigned long) _text;
365 init_mm.end_code = (unsigned long) _etext;
366 init_mm.end_data = (unsigned long) _edata;
367 init_mm.brk = (unsigned long) _end;
368
369 *cmdline_p = boot_command_line;
370
af86e597 371 early_fixmap_init();
bf4b558e 372 early_ioremap_init();
0bf757c7 373
9703d9d7
CM
374 parse_early_param();
375
7a9c43be
JM
376 /*
377 * Unmask asynchronous aborts after bringing up possible earlycon.
378 * (Report possible System Errors once we can report this occurred)
379 */
380 local_async_enable();
381
f84d0275 382 efi_init();
9703d9d7
CM
383 arm64_memblock_init();
384
37655163
AS
385 /* Parse the ACPI tables for possible boot-time configuration */
386 acpi_boot_table_init();
387
9703d9d7
CM
388 paging_init();
389 request_standard_resources();
390
0e63ea48 391 early_ioremap_reset();
f84d0275 392
3505f30f
GG
393 if (acpi_disabled)
394 unflatten_device_tree();
9703d9d7 395
e790f1de
WD
396 psci_init();
397
e8765b26 398 cpu_read_bootcpu_ops();
9703d9d7
CM
399#ifdef CONFIG_SMP
400 smp_init_cpus();
976d7d3f 401 smp_build_mpidr_hash();
9703d9d7
CM
402#endif
403
404#ifdef CONFIG_VT
405#if defined(CONFIG_VGA_CONSOLE)
406 conswitchp = &vga_con;
407#elif defined(CONFIG_DUMMY_CONSOLE)
408 conswitchp = &dummy_con;
409#endif
410#endif
411}
412
c560ecfe 413static int __init arm64_device_init(void)
de79a64d 414{
78d51e0b 415 of_iommu_init();
c560ecfe 416 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
de79a64d
CM
417 return 0;
418}
6ecba8eb 419arch_initcall_sync(arm64_device_init);
de79a64d 420
9703d9d7
CM
421static int __init topology_init(void)
422{
423 int i;
424
425 for_each_possible_cpu(i) {
df857416 426 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
427 cpu->hotpluggable = 1;
428 register_cpu(cpu, i);
429 }
430
431 return 0;
432}
433subsys_initcall(topology_init);
434
435static const char *hwcap_str[] = {
436 "fp",
437 "asimd",
46efe547 438 "evtstrm",
4bff28cc
SC
439 "aes",
440 "pmull",
441 "sha1",
442 "sha2",
443 "crc32",
9703d9d7
CM
444 NULL
445};
446
44b82b77
MR
447#ifdef CONFIG_COMPAT
448static const char *compat_hwcap_str[] = {
449 "swp",
450 "half",
451 "thumb",
452 "26bit",
453 "fastmult",
454 "fpa",
455 "vfp",
456 "edsp",
457 "java",
458 "iwmmxt",
459 "crunch",
460 "thumbee",
461 "neon",
462 "vfpv3",
463 "vfpv3d16",
464 "tls",
465 "vfpv4",
466 "idiva",
467 "idivt",
468 "vfpd32",
469 "lpae",
470 "evtstrm"
471};
472
473static const char *compat_hwcap2_str[] = {
474 "aes",
475 "pmull",
476 "sha1",
477 "sha2",
478 "crc32",
479 NULL
480};
481#endif /* CONFIG_COMPAT */
482
9703d9d7
CM
483static int c_show(struct seq_file *m, void *v)
484{
44b82b77 485 int i, j;
9703d9d7
CM
486
487 for_each_online_cpu(i) {
44b82b77
MR
488 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
489 u32 midr = cpuinfo->reg_midr;
490
9703d9d7
CM
491 /*
492 * glibc reads /proc/cpuinfo to determine the number of
493 * online processors, looking for lines beginning with
494 * "processor". Give glibc what it expects.
495 */
496#ifdef CONFIG_SMP
497 seq_printf(m, "processor\t: %d\n", i);
498#endif
5e39977e 499
44b82b77
MR
500 /*
501 * Dump out the common processor features in a single line.
502 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
503 * rather than attempting to parse this, but there's a body of
504 * software which does already (at least for 32-bit).
505 */
506 seq_puts(m, "Features\t:");
507 if (personality(current->personality) == PER_LINUX32) {
508#ifdef CONFIG_COMPAT
509 for (j = 0; compat_hwcap_str[j]; j++)
510 if (compat_elf_hwcap & (1 << j))
511 seq_printf(m, " %s", compat_hwcap_str[j]);
512
513 for (j = 0; compat_hwcap2_str[j]; j++)
514 if (compat_elf_hwcap2 & (1 << j))
515 seq_printf(m, " %s", compat_hwcap2_str[j]);
516#endif /* CONFIG_COMPAT */
517 } else {
518 for (j = 0; hwcap_str[j]; j++)
519 if (elf_hwcap & (1 << j))
520 seq_printf(m, " %s", hwcap_str[j]);
521 }
522 seq_puts(m, "\n");
523
524 seq_printf(m, "CPU implementer\t: 0x%02x\n",
525 MIDR_IMPLEMENTOR(midr));
526 seq_printf(m, "CPU architecture: 8\n");
527 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
528 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
529 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
530 }
5e39977e 531
9703d9d7
CM
532 return 0;
533}
534
535static void *c_start(struct seq_file *m, loff_t *pos)
536{
537 return *pos < 1 ? (void *)1 : NULL;
538}
539
540static void *c_next(struct seq_file *m, void *v, loff_t *pos)
541{
542 ++*pos;
543 return NULL;
544}
545
546static void c_stop(struct seq_file *m, void *v)
547{
548}
549
550const struct seq_operations cpuinfo_op = {
551 .start = c_start,
552 .next = c_next,
553 .stop = c_stop,
554 .show = c_show
555};