]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/kernel/setup.c
arm64: elf: advertise 8.1 atomic instructions as new hwcap
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / setup.c
1 /*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/clk-provider.h>
38 #include <linux/cpu.h>
39 #include <linux/interrupt.h>
40 #include <linux/smp.h>
41 #include <linux/fs.h>
42 #include <linux/proc_fs.h>
43 #include <linux/memblock.h>
44 #include <linux/of_iommu.h>
45 #include <linux/of_fdt.h>
46 #include <linux/of_platform.h>
47 #include <linux/efi.h>
48 #include <linux/personality.h>
49
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/psci.h>
65 #include <asm/efi.h>
66 #include <asm/virt.h>
67 #include <asm/xen/hypervisor.h>
68
69 unsigned long elf_hwcap __read_mostly;
70 EXPORT_SYMBOL_GPL(elf_hwcap);
71
72 #ifdef CONFIG_COMPAT
73 #define COMPAT_ELF_HWCAP_DEFAULT \
74 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
75 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
76 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
77 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
78 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
79 COMPAT_HWCAP_LPAE)
80 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
81 unsigned int compat_elf_hwcap2 __read_mostly;
82 #endif
83
84 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
85
86 phys_addr_t __fdt_pointer __initdata;
87
88 /*
89 * Standard memory resources
90 */
91 static struct resource mem_res[] = {
92 {
93 .name = "Kernel code",
94 .start = 0,
95 .end = 0,
96 .flags = IORESOURCE_MEM
97 },
98 {
99 .name = "Kernel data",
100 .start = 0,
101 .end = 0,
102 .flags = IORESOURCE_MEM
103 }
104 };
105
106 #define kernel_code mem_res[0]
107 #define kernel_data mem_res[1]
108
109 /*
110 * The recorded values of x0 .. x3 upon kernel entry.
111 */
112 u64 __cacheline_aligned boot_args[4];
113
114 void __init smp_setup_processor_id(void)
115 {
116 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
117 cpu_logical_map(0) = mpidr;
118
119 /*
120 * clear __my_cpu_offset on boot CPU to avoid hang caused by
121 * using percpu variable early, for example, lockdep will
122 * access percpu variable inside lock_release
123 */
124 set_my_cpu_offset(0);
125 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
126 }
127
128 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
129 {
130 return phys_id == cpu_logical_map(cpu);
131 }
132
133 struct mpidr_hash mpidr_hash;
134 /**
135 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
136 * level in order to build a linear index from an
137 * MPIDR value. Resulting algorithm is a collision
138 * free hash carried out through shifting and ORing
139 */
140 static void __init smp_build_mpidr_hash(void)
141 {
142 u32 i, affinity, fs[4], bits[4], ls;
143 u64 mask = 0;
144 /*
145 * Pre-scan the list of MPIDRS and filter out bits that do
146 * not contribute to affinity levels, ie they never toggle.
147 */
148 for_each_possible_cpu(i)
149 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
150 pr_debug("mask of set bits %#llx\n", mask);
151 /*
152 * Find and stash the last and first bit set at all affinity levels to
153 * check how many bits are required to represent them.
154 */
155 for (i = 0; i < 4; i++) {
156 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
157 /*
158 * Find the MSB bit and LSB bits position
159 * to determine how many bits are required
160 * to express the affinity level.
161 */
162 ls = fls(affinity);
163 fs[i] = affinity ? ffs(affinity) - 1 : 0;
164 bits[i] = ls - fs[i];
165 }
166 /*
167 * An index can be created from the MPIDR_EL1 by isolating the
168 * significant bits at each affinity level and by shifting
169 * them in order to compress the 32 bits values space to a
170 * compressed set of values. This is equivalent to hashing
171 * the MPIDR_EL1 through shifting and ORing. It is a collision free
172 * hash though not minimal since some levels might contain a number
173 * of CPUs that is not an exact power of 2 and their bit
174 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
175 */
176 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
177 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
178 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
179 (bits[1] + bits[0]);
180 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
181 fs[3] - (bits[2] + bits[1] + bits[0]);
182 mpidr_hash.mask = mask;
183 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
184 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
185 mpidr_hash.shift_aff[0],
186 mpidr_hash.shift_aff[1],
187 mpidr_hash.shift_aff[2],
188 mpidr_hash.shift_aff[3],
189 mpidr_hash.mask,
190 mpidr_hash.bits);
191 /*
192 * 4x is an arbitrary value used to warn on a hash table much bigger
193 * than expected on most systems.
194 */
195 if (mpidr_hash_size() > 4 * num_possible_cpus())
196 pr_warn("Large number of MPIDR hash buckets detected\n");
197 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
198 }
199
200 static void __init hyp_mode_check(void)
201 {
202 if (is_hyp_mode_available())
203 pr_info("CPU: All CPU(s) started at EL2\n");
204 else if (is_hyp_mode_mismatched())
205 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
206 "CPU: CPUs started in inconsistent modes");
207 else
208 pr_info("CPU: All CPU(s) started at EL1\n");
209 }
210
211 void __init do_post_cpus_up_work(void)
212 {
213 hyp_mode_check();
214 apply_alternatives_all();
215 }
216
217 #ifdef CONFIG_UP_LATE_INIT
218 void __init up_late_init(void)
219 {
220 do_post_cpus_up_work();
221 }
222 #endif /* CONFIG_UP_LATE_INIT */
223
224 static void __init setup_processor(void)
225 {
226 u64 features, block;
227 u32 cwg;
228 int cls;
229
230 printk("CPU: AArch64 Processor [%08x] revision %d\n",
231 read_cpuid_id(), read_cpuid_id() & 15);
232
233 sprintf(init_utsname()->machine, ELF_PLATFORM);
234 elf_hwcap = 0;
235
236 cpuinfo_store_boot_cpu();
237
238 /*
239 * Check for sane CTR_EL0.CWG value.
240 */
241 cwg = cache_type_cwg();
242 cls = cache_line_size();
243 if (!cwg)
244 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
245 cls);
246 if (L1_CACHE_BYTES < cls)
247 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
248 L1_CACHE_BYTES, cls);
249
250 /*
251 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
252 * The blocks we test below represent incremental functionality
253 * for non-negative values. Negative values are reserved.
254 */
255 features = read_cpuid(ID_AA64ISAR0_EL1);
256 block = (features >> 4) & 0xf;
257 if (!(block & 0x8)) {
258 switch (block) {
259 default:
260 case 2:
261 elf_hwcap |= HWCAP_PMULL;
262 case 1:
263 elf_hwcap |= HWCAP_AES;
264 case 0:
265 break;
266 }
267 }
268
269 block = (features >> 8) & 0xf;
270 if (block && !(block & 0x8))
271 elf_hwcap |= HWCAP_SHA1;
272
273 block = (features >> 12) & 0xf;
274 if (block && !(block & 0x8))
275 elf_hwcap |= HWCAP_SHA2;
276
277 block = (features >> 16) & 0xf;
278 if (block && !(block & 0x8))
279 elf_hwcap |= HWCAP_CRC32;
280
281 block = (features >> 20) & 0xf;
282 if (!(block & 0x8)) {
283 switch (block) {
284 default:
285 case 2:
286 elf_hwcap |= HWCAP_ATOMICS;
287 case 1:
288 /* RESERVED */
289 case 0:
290 break;
291 }
292 }
293
294 #ifdef CONFIG_COMPAT
295 /*
296 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
297 * the Aarch32 32-bit execution state.
298 */
299 features = read_cpuid(ID_ISAR5_EL1);
300 block = (features >> 4) & 0xf;
301 if (!(block & 0x8)) {
302 switch (block) {
303 default:
304 case 2:
305 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
306 case 1:
307 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
308 case 0:
309 break;
310 }
311 }
312
313 block = (features >> 8) & 0xf;
314 if (block && !(block & 0x8))
315 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
316
317 block = (features >> 12) & 0xf;
318 if (block && !(block & 0x8))
319 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
320
321 block = (features >> 16) & 0xf;
322 if (block && !(block & 0x8))
323 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
324 #endif
325 }
326
327 static void __init setup_machine_fdt(phys_addr_t dt_phys)
328 {
329 void *dt_virt = fixmap_remap_fdt(dt_phys);
330
331 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
332 pr_crit("\n"
333 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
334 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
335 "\nPlease check your bootloader.",
336 &dt_phys, dt_virt);
337
338 while (true)
339 cpu_relax();
340 }
341
342 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
343 }
344
345 static void __init request_standard_resources(void)
346 {
347 struct memblock_region *region;
348 struct resource *res;
349
350 kernel_code.start = virt_to_phys(_text);
351 kernel_code.end = virt_to_phys(_etext - 1);
352 kernel_data.start = virt_to_phys(_sdata);
353 kernel_data.end = virt_to_phys(_end - 1);
354
355 for_each_memblock(memory, region) {
356 res = alloc_bootmem_low(sizeof(*res));
357 res->name = "System RAM";
358 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
359 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
360 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
361
362 request_resource(&iomem_resource, res);
363
364 if (kernel_code.start >= res->start &&
365 kernel_code.end <= res->end)
366 request_resource(res, &kernel_code);
367 if (kernel_data.start >= res->start &&
368 kernel_data.end <= res->end)
369 request_resource(res, &kernel_data);
370 }
371 }
372
373 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
374
375 void __init setup_arch(char **cmdline_p)
376 {
377 setup_processor();
378
379 init_mm.start_code = (unsigned long) _text;
380 init_mm.end_code = (unsigned long) _etext;
381 init_mm.end_data = (unsigned long) _edata;
382 init_mm.brk = (unsigned long) _end;
383
384 *cmdline_p = boot_command_line;
385
386 early_fixmap_init();
387 early_ioremap_init();
388
389 setup_machine_fdt(__fdt_pointer);
390
391 parse_early_param();
392
393 /*
394 * Unmask asynchronous aborts after bringing up possible earlycon.
395 * (Report possible System Errors once we can report this occurred)
396 */
397 local_async_enable();
398
399 efi_init();
400 arm64_memblock_init();
401
402 /* Parse the ACPI tables for possible boot-time configuration */
403 acpi_boot_table_init();
404
405 paging_init();
406 request_standard_resources();
407
408 early_ioremap_reset();
409
410 if (acpi_disabled) {
411 unflatten_device_tree();
412 psci_dt_init();
413 } else {
414 psci_acpi_init();
415 }
416 xen_early_init();
417
418 cpu_read_bootcpu_ops();
419 smp_init_cpus();
420 smp_build_mpidr_hash();
421
422 #ifdef CONFIG_VT
423 #if defined(CONFIG_VGA_CONSOLE)
424 conswitchp = &vga_con;
425 #elif defined(CONFIG_DUMMY_CONSOLE)
426 conswitchp = &dummy_con;
427 #endif
428 #endif
429 if (boot_args[1] || boot_args[2] || boot_args[3]) {
430 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
431 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
432 "This indicates a broken bootloader or old kernel\n",
433 boot_args[1], boot_args[2], boot_args[3]);
434 }
435 }
436
437 static int __init arm64_device_init(void)
438 {
439 if (of_have_populated_dt()) {
440 of_iommu_init();
441 of_platform_populate(NULL, of_default_bus_match_table,
442 NULL, NULL);
443 } else if (acpi_disabled) {
444 pr_crit("Device tree not populated\n");
445 }
446 return 0;
447 }
448 arch_initcall_sync(arm64_device_init);
449
450 static int __init topology_init(void)
451 {
452 int i;
453
454 for_each_possible_cpu(i) {
455 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
456 cpu->hotpluggable = 1;
457 register_cpu(cpu, i);
458 }
459
460 return 0;
461 }
462 subsys_initcall(topology_init);
463
464 static const char *hwcap_str[] = {
465 "fp",
466 "asimd",
467 "evtstrm",
468 "aes",
469 "pmull",
470 "sha1",
471 "sha2",
472 "crc32",
473 "atomics",
474 NULL
475 };
476
477 #ifdef CONFIG_COMPAT
478 static const char *compat_hwcap_str[] = {
479 "swp",
480 "half",
481 "thumb",
482 "26bit",
483 "fastmult",
484 "fpa",
485 "vfp",
486 "edsp",
487 "java",
488 "iwmmxt",
489 "crunch",
490 "thumbee",
491 "neon",
492 "vfpv3",
493 "vfpv3d16",
494 "tls",
495 "vfpv4",
496 "idiva",
497 "idivt",
498 "vfpd32",
499 "lpae",
500 "evtstrm"
501 };
502
503 static const char *compat_hwcap2_str[] = {
504 "aes",
505 "pmull",
506 "sha1",
507 "sha2",
508 "crc32",
509 NULL
510 };
511 #endif /* CONFIG_COMPAT */
512
513 static int c_show(struct seq_file *m, void *v)
514 {
515 int i, j;
516
517 for_each_online_cpu(i) {
518 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
519 u32 midr = cpuinfo->reg_midr;
520
521 /*
522 * glibc reads /proc/cpuinfo to determine the number of
523 * online processors, looking for lines beginning with
524 * "processor". Give glibc what it expects.
525 */
526 seq_printf(m, "processor\t: %d\n", i);
527
528 /*
529 * Dump out the common processor features in a single line.
530 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
531 * rather than attempting to parse this, but there's a body of
532 * software which does already (at least for 32-bit).
533 */
534 seq_puts(m, "Features\t:");
535 if (personality(current->personality) == PER_LINUX32) {
536 #ifdef CONFIG_COMPAT
537 for (j = 0; compat_hwcap_str[j]; j++)
538 if (compat_elf_hwcap & (1 << j))
539 seq_printf(m, " %s", compat_hwcap_str[j]);
540
541 for (j = 0; compat_hwcap2_str[j]; j++)
542 if (compat_elf_hwcap2 & (1 << j))
543 seq_printf(m, " %s", compat_hwcap2_str[j]);
544 #endif /* CONFIG_COMPAT */
545 } else {
546 for (j = 0; hwcap_str[j]; j++)
547 if (elf_hwcap & (1 << j))
548 seq_printf(m, " %s", hwcap_str[j]);
549 }
550 seq_puts(m, "\n");
551
552 seq_printf(m, "CPU implementer\t: 0x%02x\n",
553 MIDR_IMPLEMENTOR(midr));
554 seq_printf(m, "CPU architecture: 8\n");
555 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
556 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
557 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
558 }
559
560 return 0;
561 }
562
563 static void *c_start(struct seq_file *m, loff_t *pos)
564 {
565 return *pos < 1 ? (void *)1 : NULL;
566 }
567
568 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
569 {
570 ++*pos;
571 return NULL;
572 }
573
574 static void c_stop(struct seq_file *m, void *v)
575 {
576 }
577
578 const struct seq_operations cpuinfo_op = {
579 .start = c_start,
580 .next = c_next,
581 .stop = c_stop,
582 .show = c_show
583 };