]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kernel/setup.c
arm64: alternatives: add enable parameter to conditional asm macros
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
37655163 20#include <linux/acpi.h>
9703d9d7
CM
21#include <linux/export.h>
22#include <linux/kernel.h>
23#include <linux/stddef.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/utsname.h>
27#include <linux/initrd.h>
28#include <linux/console.h>
a41dc0e8 29#include <linux/cache.h>
9703d9d7
CM
30#include <linux/bootmem.h>
31#include <linux/seq_file.h>
32#include <linux/screen_info.h>
33#include <linux/init.h>
34#include <linux/kexec.h>
35#include <linux/crash_dump.h>
36#include <linux/root_dev.h>
de79a64d 37#include <linux/clk-provider.h>
9703d9d7
CM
38#include <linux/cpu.h>
39#include <linux/interrupt.h>
40#include <linux/smp.h>
41#include <linux/fs.h>
42#include <linux/proc_fs.h>
43#include <linux/memblock.h>
78d51e0b 44#include <linux/of_iommu.h>
9703d9d7 45#include <linux/of_fdt.h>
d6bafb9b 46#include <linux/of_platform.h>
f84d0275 47#include <linux/efi.h>
44b82b77 48#include <linux/personality.h>
9703d9d7 49
37655163 50#include <asm/acpi.h>
bf4b558e 51#include <asm/fixmap.h>
df857416 52#include <asm/cpu.h>
9703d9d7
CM
53#include <asm/cputype.h>
54#include <asm/elf.h>
930da09f 55#include <asm/cpufeature.h>
e8765b26 56#include <asm/cpu_ops.h>
9703d9d7
CM
57#include <asm/sections.h>
58#include <asm/setup.h>
4c7aa002 59#include <asm/smp_plat.h>
9703d9d7
CM
60#include <asm/cacheflush.h>
61#include <asm/tlbflush.h>
62#include <asm/traps.h>
63#include <asm/memblock.h>
e790f1de 64#include <asm/psci.h>
f84d0275 65#include <asm/efi.h>
667f3fd3 66#include <asm/virt.h>
5882bfef 67#include <asm/xen/hypervisor.h>
9703d9d7 68
25804e6a 69unsigned long elf_hwcap __read_mostly;
9703d9d7
CM
70EXPORT_SYMBOL_GPL(elf_hwcap);
71
46efe547
SH
72#ifdef CONFIG_COMPAT
73#define COMPAT_ELF_HWCAP_DEFAULT \
74 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
75 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
76 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
77 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
7d57511d
CM
78 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
79 COMPAT_HWCAP_LPAE)
46efe547 80unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
28964d32 81unsigned int compat_elf_hwcap2 __read_mostly;
46efe547
SH
82#endif
83
06f9eb88 84DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
930da09f 85
9703d9d7
CM
86phys_addr_t __fdt_pointer __initdata;
87
88/*
89 * Standard memory resources
90 */
91static struct resource mem_res[] = {
92 {
93 .name = "Kernel code",
94 .start = 0,
95 .end = 0,
96 .flags = IORESOURCE_MEM
97 },
98 {
99 .name = "Kernel data",
100 .start = 0,
101 .end = 0,
102 .flags = IORESOURCE_MEM
103 }
104};
105
106#define kernel_code mem_res[0]
107#define kernel_data mem_res[1]
108
da9c177d
AB
109/*
110 * The recorded values of x0 .. x3 upon kernel entry.
111 */
112u64 __cacheline_aligned boot_args[4];
113
71586276
WD
114void __init smp_setup_processor_id(void)
115{
80708677
MR
116 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
117 cpu_logical_map(0) = mpidr;
118
71586276
WD
119 /*
120 * clear __my_cpu_offset on boot CPU to avoid hang caused by
121 * using percpu variable early, for example, lockdep will
122 * access percpu variable inside lock_release
123 */
124 set_my_cpu_offset(0);
80708677 125 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
71586276
WD
126}
127
6e15d0e0
SH
128bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
129{
130 return phys_id == cpu_logical_map(cpu);
131}
132
976d7d3f 133struct mpidr_hash mpidr_hash;
976d7d3f
LP
134/**
135 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
136 * level in order to build a linear index from an
137 * MPIDR value. Resulting algorithm is a collision
138 * free hash carried out through shifting and ORing
139 */
140static void __init smp_build_mpidr_hash(void)
141{
142 u32 i, affinity, fs[4], bits[4], ls;
143 u64 mask = 0;
144 /*
145 * Pre-scan the list of MPIDRS and filter out bits that do
146 * not contribute to affinity levels, ie they never toggle.
147 */
148 for_each_possible_cpu(i)
149 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
150 pr_debug("mask of set bits %#llx\n", mask);
151 /*
152 * Find and stash the last and first bit set at all affinity levels to
153 * check how many bits are required to represent them.
154 */
155 for (i = 0; i < 4; i++) {
156 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
157 /*
158 * Find the MSB bit and LSB bits position
159 * to determine how many bits are required
160 * to express the affinity level.
161 */
162 ls = fls(affinity);
163 fs[i] = affinity ? ffs(affinity) - 1 : 0;
164 bits[i] = ls - fs[i];
165 }
166 /*
167 * An index can be created from the MPIDR_EL1 by isolating the
168 * significant bits at each affinity level and by shifting
169 * them in order to compress the 32 bits values space to a
170 * compressed set of values. This is equivalent to hashing
171 * the MPIDR_EL1 through shifting and ORing. It is a collision free
172 * hash though not minimal since some levels might contain a number
173 * of CPUs that is not an exact power of 2 and their bit
174 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
175 */
176 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
177 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
178 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
179 (bits[1] + bits[0]);
180 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
181 fs[3] - (bits[2] + bits[1] + bits[0]);
182 mpidr_hash.mask = mask;
183 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
184 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
185 mpidr_hash.shift_aff[0],
186 mpidr_hash.shift_aff[1],
187 mpidr_hash.shift_aff[2],
188 mpidr_hash.shift_aff[3],
189 mpidr_hash.mask,
190 mpidr_hash.bits);
191 /*
192 * 4x is an arbitrary value used to warn on a hash table much bigger
193 * than expected on most systems.
194 */
195 if (mpidr_hash_size() > 4 * num_possible_cpus())
196 pr_warn("Large number of MPIDR hash buckets detected\n");
197 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
198}
976d7d3f 199
667f3fd3
MR
200static void __init hyp_mode_check(void)
201{
202 if (is_hyp_mode_available())
203 pr_info("CPU: All CPU(s) started at EL2\n");
204 else if (is_hyp_mode_mismatched())
205 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
206 "CPU: CPUs started in inconsistent modes");
207 else
208 pr_info("CPU: All CPU(s) started at EL1\n");
209}
210
137650aa
MR
211void __init do_post_cpus_up_work(void)
212{
667f3fd3 213 hyp_mode_check();
137650aa
MR
214 apply_alternatives_all();
215}
216
217#ifdef CONFIG_UP_LATE_INIT
218void __init up_late_init(void)
219{
220 do_post_cpus_up_work();
221}
222#endif /* CONFIG_UP_LATE_INIT */
223
9703d9d7
CM
224static void __init setup_processor(void)
225{
4bff28cc 226 u64 features, block;
a41dc0e8
CM
227 u32 cwg;
228 int cls;
9703d9d7 229
a591ede4
MZ
230 printk("CPU: AArch64 Processor [%08x] revision %d\n",
231 read_cpuid_id(), read_cpuid_id() & 15);
9703d9d7 232
94ed1f2c 233 sprintf(init_utsname()->machine, ELF_PLATFORM);
9703d9d7 234 elf_hwcap = 0;
4bff28cc 235
df857416
MR
236 cpuinfo_store_boot_cpu();
237
a41dc0e8
CM
238 /*
239 * Check for sane CTR_EL0.CWG value.
240 */
241 cwg = cache_type_cwg();
242 cls = cache_line_size();
243 if (!cwg)
244 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
245 cls);
246 if (L1_CACHE_BYTES < cls)
247 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
248 L1_CACHE_BYTES, cls);
249
4bff28cc
SC
250 /*
251 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
252 * The blocks we test below represent incremental functionality
253 * for non-negative values. Negative values are reserved.
254 */
255 features = read_cpuid(ID_AA64ISAR0_EL1);
256 block = (features >> 4) & 0xf;
257 if (!(block & 0x8)) {
258 switch (block) {
259 default:
260 case 2:
261 elf_hwcap |= HWCAP_PMULL;
262 case 1:
263 elf_hwcap |= HWCAP_AES;
264 case 0:
265 break;
266 }
267 }
268
269 block = (features >> 8) & 0xf;
270 if (block && !(block & 0x8))
271 elf_hwcap |= HWCAP_SHA1;
272
273 block = (features >> 12) & 0xf;
274 if (block && !(block & 0x8))
275 elf_hwcap |= HWCAP_SHA2;
276
277 block = (features >> 16) & 0xf;
278 if (block && !(block & 0x8))
279 elf_hwcap |= HWCAP_CRC32;
4cf761cd
AB
280
281#ifdef CONFIG_COMPAT
282 /*
283 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
284 * the Aarch32 32-bit execution state.
285 */
286 features = read_cpuid(ID_ISAR5_EL1);
287 block = (features >> 4) & 0xf;
288 if (!(block & 0x8)) {
289 switch (block) {
290 default:
291 case 2:
292 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
293 case 1:
294 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
295 case 0:
296 break;
297 }
298 }
299
300 block = (features >> 8) & 0xf;
301 if (block && !(block & 0x8))
302 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
303
304 block = (features >> 12) & 0xf;
305 if (block && !(block & 0x8))
306 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
307
308 block = (features >> 16) & 0xf;
309 if (block && !(block & 0x8))
310 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
311#endif
9703d9d7
CM
312}
313
314static void __init setup_machine_fdt(phys_addr_t dt_phys)
315{
61bd93ce
AB
316 void *dt_virt = fixmap_remap_fdt(dt_phys);
317
318 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
319 pr_crit("\n"
320 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
321 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
322 "\nPlease check your bootloader.",
323 &dt_phys, dt_virt);
9703d9d7
CM
324
325 while (true)
326 cpu_relax();
327 }
5e39977e 328
44b82b77 329 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
9703d9d7
CM
330}
331
9703d9d7
CM
332static void __init request_standard_resources(void)
333{
334 struct memblock_region *region;
335 struct resource *res;
336
337 kernel_code.start = virt_to_phys(_text);
338 kernel_code.end = virt_to_phys(_etext - 1);
339 kernel_data.start = virt_to_phys(_sdata);
340 kernel_data.end = virt_to_phys(_end - 1);
341
342 for_each_memblock(memory, region) {
343 res = alloc_bootmem_low(sizeof(*res));
344 res->name = "System RAM";
345 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
346 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
347 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
348
349 request_resource(&iomem_resource, res);
350
351 if (kernel_code.start >= res->start &&
352 kernel_code.end <= res->end)
353 request_resource(res, &kernel_code);
354 if (kernel_data.start >= res->start &&
355 kernel_data.end <= res->end)
356 request_resource(res, &kernel_data);
357 }
358}
359
4c7aa002
JM
360u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
361
9703d9d7
CM
362void __init setup_arch(char **cmdline_p)
363{
364 setup_processor();
365
9703d9d7
CM
366 init_mm.start_code = (unsigned long) _text;
367 init_mm.end_code = (unsigned long) _etext;
368 init_mm.end_data = (unsigned long) _edata;
369 init_mm.brk = (unsigned long) _end;
370
371 *cmdline_p = boot_command_line;
372
af86e597 373 early_fixmap_init();
bf4b558e 374 early_ioremap_init();
0bf757c7 375
61bd93ce
AB
376 setup_machine_fdt(__fdt_pointer);
377
9703d9d7
CM
378 parse_early_param();
379
7a9c43be
JM
380 /*
381 * Unmask asynchronous aborts after bringing up possible earlycon.
382 * (Report possible System Errors once we can report this occurred)
383 */
384 local_async_enable();
385
f84d0275 386 efi_init();
9703d9d7
CM
387 arm64_memblock_init();
388
37655163
AS
389 /* Parse the ACPI tables for possible boot-time configuration */
390 acpi_boot_table_init();
391
9703d9d7
CM
392 paging_init();
393 request_standard_resources();
394
0e63ea48 395 early_ioremap_reset();
f84d0275 396
fb094eb1 397 if (acpi_disabled) {
3505f30f 398 unflatten_device_tree();
7c59a3df
GG
399 psci_dt_init();
400 } else {
401 psci_acpi_init();
402 }
5882bfef 403 xen_early_init();
e790f1de 404
0f078336 405 cpu_read_bootcpu_ops();
0f078336 406 smp_init_cpus();
976d7d3f 407 smp_build_mpidr_hash();
9703d9d7
CM
408
409#ifdef CONFIG_VT
410#if defined(CONFIG_VGA_CONSOLE)
411 conswitchp = &vga_con;
412#elif defined(CONFIG_DUMMY_CONSOLE)
413 conswitchp = &dummy_con;
414#endif
415#endif
da9c177d
AB
416 if (boot_args[1] || boot_args[2] || boot_args[3]) {
417 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
418 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
419 "This indicates a broken bootloader or old kernel\n",
420 boot_args[1], boot_args[2], boot_args[3]);
421 }
9703d9d7
CM
422}
423
c560ecfe 424static int __init arm64_device_init(void)
de79a64d 425{
78d51e0b 426 of_iommu_init();
c560ecfe 427 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
de79a64d
CM
428 return 0;
429}
6ecba8eb 430arch_initcall_sync(arm64_device_init);
de79a64d 431
9703d9d7
CM
432static int __init topology_init(void)
433{
434 int i;
435
436 for_each_possible_cpu(i) {
df857416 437 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
438 cpu->hotpluggable = 1;
439 register_cpu(cpu, i);
440 }
441
442 return 0;
443}
444subsys_initcall(topology_init);
445
446static const char *hwcap_str[] = {
447 "fp",
448 "asimd",
46efe547 449 "evtstrm",
4bff28cc
SC
450 "aes",
451 "pmull",
452 "sha1",
453 "sha2",
454 "crc32",
9703d9d7
CM
455 NULL
456};
457
44b82b77
MR
458#ifdef CONFIG_COMPAT
459static const char *compat_hwcap_str[] = {
460 "swp",
461 "half",
462 "thumb",
463 "26bit",
464 "fastmult",
465 "fpa",
466 "vfp",
467 "edsp",
468 "java",
469 "iwmmxt",
470 "crunch",
471 "thumbee",
472 "neon",
473 "vfpv3",
474 "vfpv3d16",
475 "tls",
476 "vfpv4",
477 "idiva",
478 "idivt",
479 "vfpd32",
480 "lpae",
481 "evtstrm"
482};
483
484static const char *compat_hwcap2_str[] = {
485 "aes",
486 "pmull",
487 "sha1",
488 "sha2",
489 "crc32",
490 NULL
491};
492#endif /* CONFIG_COMPAT */
493
9703d9d7
CM
494static int c_show(struct seq_file *m, void *v)
495{
44b82b77 496 int i, j;
9703d9d7
CM
497
498 for_each_online_cpu(i) {
44b82b77
MR
499 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
500 u32 midr = cpuinfo->reg_midr;
501
9703d9d7
CM
502 /*
503 * glibc reads /proc/cpuinfo to determine the number of
504 * online processors, looking for lines beginning with
505 * "processor". Give glibc what it expects.
506 */
9703d9d7 507 seq_printf(m, "processor\t: %d\n", i);
5e39977e 508
44b82b77
MR
509 /*
510 * Dump out the common processor features in a single line.
511 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
512 * rather than attempting to parse this, but there's a body of
513 * software which does already (at least for 32-bit).
514 */
515 seq_puts(m, "Features\t:");
516 if (personality(current->personality) == PER_LINUX32) {
517#ifdef CONFIG_COMPAT
518 for (j = 0; compat_hwcap_str[j]; j++)
519 if (compat_elf_hwcap & (1 << j))
520 seq_printf(m, " %s", compat_hwcap_str[j]);
521
522 for (j = 0; compat_hwcap2_str[j]; j++)
523 if (compat_elf_hwcap2 & (1 << j))
524 seq_printf(m, " %s", compat_hwcap2_str[j]);
525#endif /* CONFIG_COMPAT */
526 } else {
527 for (j = 0; hwcap_str[j]; j++)
528 if (elf_hwcap & (1 << j))
529 seq_printf(m, " %s", hwcap_str[j]);
530 }
531 seq_puts(m, "\n");
532
533 seq_printf(m, "CPU implementer\t: 0x%02x\n",
534 MIDR_IMPLEMENTOR(midr));
535 seq_printf(m, "CPU architecture: 8\n");
536 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
537 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
538 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
539 }
5e39977e 540
9703d9d7
CM
541 return 0;
542}
543
544static void *c_start(struct seq_file *m, loff_t *pos)
545{
546 return *pos < 1 ? (void *)1 : NULL;
547}
548
549static void *c_next(struct seq_file *m, void *v, loff_t *pos)
550{
551 ++*pos;
552 return NULL;
553}
554
555static void c_stop(struct seq_file *m, void *v)
556{
557}
558
559const struct seq_operations cpuinfo_op = {
560 .start = c_start,
561 .next = c_next,
562 .stop = c_stop,
563 .show = c_show
564};