]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/setup.c
arm64: remove __calc_phys_offset
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/ioport.h>
24#include <linux/delay.h>
25#include <linux/utsname.h>
26#include <linux/initrd.h>
27#include <linux/console.h>
a41dc0e8 28#include <linux/cache.h>
9703d9d7
CM
29#include <linux/bootmem.h>
30#include <linux/seq_file.h>
31#include <linux/screen_info.h>
32#include <linux/init.h>
33#include <linux/kexec.h>
34#include <linux/crash_dump.h>
35#include <linux/root_dev.h>
de79a64d 36#include <linux/clk-provider.h>
9703d9d7
CM
37#include <linux/cpu.h>
38#include <linux/interrupt.h>
39#include <linux/smp.h>
40#include <linux/fs.h>
41#include <linux/proc_fs.h>
42#include <linux/memblock.h>
78d51e0b 43#include <linux/of_iommu.h>
9703d9d7 44#include <linux/of_fdt.h>
d6bafb9b 45#include <linux/of_platform.h>
f84d0275 46#include <linux/efi.h>
44b82b77 47#include <linux/personality.h>
9703d9d7 48
bf4b558e 49#include <asm/fixmap.h>
df857416 50#include <asm/cpu.h>
9703d9d7
CM
51#include <asm/cputype.h>
52#include <asm/elf.h>
930da09f 53#include <asm/cpufeature.h>
e8765b26 54#include <asm/cpu_ops.h>
9703d9d7
CM
55#include <asm/sections.h>
56#include <asm/setup.h>
4c7aa002 57#include <asm/smp_plat.h>
9703d9d7
CM
58#include <asm/cacheflush.h>
59#include <asm/tlbflush.h>
60#include <asm/traps.h>
61#include <asm/memblock.h>
e790f1de 62#include <asm/psci.h>
f84d0275 63#include <asm/efi.h>
667f3fd3 64#include <asm/virt.h>
9703d9d7 65
25804e6a 66unsigned long elf_hwcap __read_mostly;
9703d9d7
CM
67EXPORT_SYMBOL_GPL(elf_hwcap);
68
46efe547
SH
69#ifdef CONFIG_COMPAT
70#define COMPAT_ELF_HWCAP_DEFAULT \
71 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
72 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
73 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
74 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
7d57511d
CM
75 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
76 COMPAT_HWCAP_LPAE)
46efe547 77unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
28964d32 78unsigned int compat_elf_hwcap2 __read_mostly;
46efe547
SH
79#endif
80
06f9eb88 81DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
930da09f 82
9703d9d7
CM
83phys_addr_t __fdt_pointer __initdata;
84
85/*
86 * Standard memory resources
87 */
88static struct resource mem_res[] = {
89 {
90 .name = "Kernel code",
91 .start = 0,
92 .end = 0,
93 .flags = IORESOURCE_MEM
94 },
95 {
96 .name = "Kernel data",
97 .start = 0,
98 .end = 0,
99 .flags = IORESOURCE_MEM
100 }
101};
102
103#define kernel_code mem_res[0]
104#define kernel_data mem_res[1]
105
106void __init early_print(const char *str, ...)
107{
108 char buf[256];
109 va_list ap;
110
111 va_start(ap, str);
112 vsnprintf(buf, sizeof(buf), str, ap);
113 va_end(ap);
114
115 printk("%s", buf);
116}
117
71586276
WD
118void __init smp_setup_processor_id(void)
119{
80708677
MR
120 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
121 cpu_logical_map(0) = mpidr;
122
71586276
WD
123 /*
124 * clear __my_cpu_offset on boot CPU to avoid hang caused by
125 * using percpu variable early, for example, lockdep will
126 * access percpu variable inside lock_release
127 */
128 set_my_cpu_offset(0);
80708677 129 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
71586276
WD
130}
131
6e15d0e0
SH
132bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
133{
134 return phys_id == cpu_logical_map(cpu);
135}
136
976d7d3f
LP
137struct mpidr_hash mpidr_hash;
138#ifdef CONFIG_SMP
139/**
140 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
141 * level in order to build a linear index from an
142 * MPIDR value. Resulting algorithm is a collision
143 * free hash carried out through shifting and ORing
144 */
145static void __init smp_build_mpidr_hash(void)
146{
147 u32 i, affinity, fs[4], bits[4], ls;
148 u64 mask = 0;
149 /*
150 * Pre-scan the list of MPIDRS and filter out bits that do
151 * not contribute to affinity levels, ie they never toggle.
152 */
153 for_each_possible_cpu(i)
154 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
155 pr_debug("mask of set bits %#llx\n", mask);
156 /*
157 * Find and stash the last and first bit set at all affinity levels to
158 * check how many bits are required to represent them.
159 */
160 for (i = 0; i < 4; i++) {
161 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
162 /*
163 * Find the MSB bit and LSB bits position
164 * to determine how many bits are required
165 * to express the affinity level.
166 */
167 ls = fls(affinity);
168 fs[i] = affinity ? ffs(affinity) - 1 : 0;
169 bits[i] = ls - fs[i];
170 }
171 /*
172 * An index can be created from the MPIDR_EL1 by isolating the
173 * significant bits at each affinity level and by shifting
174 * them in order to compress the 32 bits values space to a
175 * compressed set of values. This is equivalent to hashing
176 * the MPIDR_EL1 through shifting and ORing. It is a collision free
177 * hash though not minimal since some levels might contain a number
178 * of CPUs that is not an exact power of 2 and their bit
179 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
180 */
181 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
182 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
183 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
184 (bits[1] + bits[0]);
185 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
186 fs[3] - (bits[2] + bits[1] + bits[0]);
187 mpidr_hash.mask = mask;
188 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
189 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
190 mpidr_hash.shift_aff[0],
191 mpidr_hash.shift_aff[1],
192 mpidr_hash.shift_aff[2],
193 mpidr_hash.shift_aff[3],
194 mpidr_hash.mask,
195 mpidr_hash.bits);
196 /*
197 * 4x is an arbitrary value used to warn on a hash table much bigger
198 * than expected on most systems.
199 */
200 if (mpidr_hash_size() > 4 * num_possible_cpus())
201 pr_warn("Large number of MPIDR hash buckets detected\n");
202 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
203}
204#endif
205
667f3fd3
MR
206static void __init hyp_mode_check(void)
207{
208 if (is_hyp_mode_available())
209 pr_info("CPU: All CPU(s) started at EL2\n");
210 else if (is_hyp_mode_mismatched())
211 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
212 "CPU: CPUs started in inconsistent modes");
213 else
214 pr_info("CPU: All CPU(s) started at EL1\n");
215}
216
137650aa
MR
217void __init do_post_cpus_up_work(void)
218{
667f3fd3 219 hyp_mode_check();
137650aa
MR
220 apply_alternatives_all();
221}
222
223#ifdef CONFIG_UP_LATE_INIT
224void __init up_late_init(void)
225{
226 do_post_cpus_up_work();
227}
228#endif /* CONFIG_UP_LATE_INIT */
229
9703d9d7
CM
230static void __init setup_processor(void)
231{
4bff28cc 232 u64 features, block;
a41dc0e8
CM
233 u32 cwg;
234 int cls;
9703d9d7 235
a591ede4
MZ
236 printk("CPU: AArch64 Processor [%08x] revision %d\n",
237 read_cpuid_id(), read_cpuid_id() & 15);
9703d9d7 238
94ed1f2c 239 sprintf(init_utsname()->machine, ELF_PLATFORM);
9703d9d7 240 elf_hwcap = 0;
4bff28cc 241
df857416
MR
242 cpuinfo_store_boot_cpu();
243
a41dc0e8
CM
244 /*
245 * Check for sane CTR_EL0.CWG value.
246 */
247 cwg = cache_type_cwg();
248 cls = cache_line_size();
249 if (!cwg)
250 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
251 cls);
252 if (L1_CACHE_BYTES < cls)
253 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
254 L1_CACHE_BYTES, cls);
255
4bff28cc
SC
256 /*
257 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
258 * The blocks we test below represent incremental functionality
259 * for non-negative values. Negative values are reserved.
260 */
261 features = read_cpuid(ID_AA64ISAR0_EL1);
262 block = (features >> 4) & 0xf;
263 if (!(block & 0x8)) {
264 switch (block) {
265 default:
266 case 2:
267 elf_hwcap |= HWCAP_PMULL;
268 case 1:
269 elf_hwcap |= HWCAP_AES;
270 case 0:
271 break;
272 }
273 }
274
275 block = (features >> 8) & 0xf;
276 if (block && !(block & 0x8))
277 elf_hwcap |= HWCAP_SHA1;
278
279 block = (features >> 12) & 0xf;
280 if (block && !(block & 0x8))
281 elf_hwcap |= HWCAP_SHA2;
282
283 block = (features >> 16) & 0xf;
284 if (block && !(block & 0x8))
285 elf_hwcap |= HWCAP_CRC32;
4cf761cd
AB
286
287#ifdef CONFIG_COMPAT
288 /*
289 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
290 * the Aarch32 32-bit execution state.
291 */
292 features = read_cpuid(ID_ISAR5_EL1);
293 block = (features >> 4) & 0xf;
294 if (!(block & 0x8)) {
295 switch (block) {
296 default:
297 case 2:
298 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
299 case 1:
300 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
301 case 0:
302 break;
303 }
304 }
305
306 block = (features >> 8) & 0xf;
307 if (block && !(block & 0x8))
308 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
309
310 block = (features >> 12) & 0xf;
311 if (block && !(block & 0x8))
312 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
313
314 block = (features >> 16) & 0xf;
315 if (block && !(block & 0x8))
316 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
317#endif
9703d9d7
CM
318}
319
320static void __init setup_machine_fdt(phys_addr_t dt_phys)
321{
d5189cc5 322 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
9703d9d7
CM
323 early_print("\n"
324 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
d5189cc5 325 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
9703d9d7 326 "\nPlease check your bootloader.\n",
d5189cc5 327 dt_phys, phys_to_virt(dt_phys));
9703d9d7
CM
328
329 while (true)
330 cpu_relax();
331 }
5e39977e 332
44b82b77 333 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
9703d9d7
CM
334}
335
9703d9d7
CM
336static void __init request_standard_resources(void)
337{
338 struct memblock_region *region;
339 struct resource *res;
340
341 kernel_code.start = virt_to_phys(_text);
342 kernel_code.end = virt_to_phys(_etext - 1);
343 kernel_data.start = virt_to_phys(_sdata);
344 kernel_data.end = virt_to_phys(_end - 1);
345
346 for_each_memblock(memory, region) {
347 res = alloc_bootmem_low(sizeof(*res));
348 res->name = "System RAM";
349 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
350 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
351 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
352
353 request_resource(&iomem_resource, res);
354
355 if (kernel_code.start >= res->start &&
356 kernel_code.end <= res->end)
357 request_resource(res, &kernel_code);
358 if (kernel_data.start >= res->start &&
359 kernel_data.end <= res->end)
360 request_resource(res, &kernel_data);
361 }
362}
363
4c7aa002
JM
364u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
365
9703d9d7
CM
366void __init setup_arch(char **cmdline_p)
367{
368 setup_processor();
369
370 setup_machine_fdt(__fdt_pointer);
371
372 init_mm.start_code = (unsigned long) _text;
373 init_mm.end_code = (unsigned long) _etext;
374 init_mm.end_data = (unsigned long) _edata;
375 init_mm.brk = (unsigned long) _end;
376
377 *cmdline_p = boot_command_line;
378
af86e597 379 early_fixmap_init();
bf4b558e 380 early_ioremap_init();
0bf757c7 381
9703d9d7
CM
382 parse_early_param();
383
7a9c43be
JM
384 /*
385 * Unmask asynchronous aborts after bringing up possible earlycon.
386 * (Report possible System Errors once we can report this occurred)
387 */
388 local_async_enable();
389
f84d0275 390 efi_init();
9703d9d7
CM
391 arm64_memblock_init();
392
393 paging_init();
394 request_standard_resources();
395
0e63ea48 396 early_ioremap_reset();
f84d0275 397
9703d9d7
CM
398 unflatten_device_tree();
399
e790f1de
WD
400 psci_init();
401
e8765b26 402 cpu_read_bootcpu_ops();
9703d9d7
CM
403#ifdef CONFIG_SMP
404 smp_init_cpus();
976d7d3f 405 smp_build_mpidr_hash();
9703d9d7
CM
406#endif
407
408#ifdef CONFIG_VT
409#if defined(CONFIG_VGA_CONSOLE)
410 conswitchp = &vga_con;
411#elif defined(CONFIG_DUMMY_CONSOLE)
412 conswitchp = &dummy_con;
413#endif
414#endif
415}
416
c560ecfe 417static int __init arm64_device_init(void)
de79a64d 418{
78d51e0b 419 of_iommu_init();
c560ecfe 420 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
de79a64d
CM
421 return 0;
422}
6ecba8eb 423arch_initcall_sync(arm64_device_init);
de79a64d 424
9703d9d7
CM
425static int __init topology_init(void)
426{
427 int i;
428
429 for_each_possible_cpu(i) {
df857416 430 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
431 cpu->hotpluggable = 1;
432 register_cpu(cpu, i);
433 }
434
435 return 0;
436}
437subsys_initcall(topology_init);
438
439static const char *hwcap_str[] = {
440 "fp",
441 "asimd",
46efe547 442 "evtstrm",
4bff28cc
SC
443 "aes",
444 "pmull",
445 "sha1",
446 "sha2",
447 "crc32",
9703d9d7
CM
448 NULL
449};
450
44b82b77
MR
451#ifdef CONFIG_COMPAT
452static const char *compat_hwcap_str[] = {
453 "swp",
454 "half",
455 "thumb",
456 "26bit",
457 "fastmult",
458 "fpa",
459 "vfp",
460 "edsp",
461 "java",
462 "iwmmxt",
463 "crunch",
464 "thumbee",
465 "neon",
466 "vfpv3",
467 "vfpv3d16",
468 "tls",
469 "vfpv4",
470 "idiva",
471 "idivt",
472 "vfpd32",
473 "lpae",
474 "evtstrm"
475};
476
477static const char *compat_hwcap2_str[] = {
478 "aes",
479 "pmull",
480 "sha1",
481 "sha2",
482 "crc32",
483 NULL
484};
485#endif /* CONFIG_COMPAT */
486
9703d9d7
CM
487static int c_show(struct seq_file *m, void *v)
488{
44b82b77 489 int i, j;
9703d9d7
CM
490
491 for_each_online_cpu(i) {
44b82b77
MR
492 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
493 u32 midr = cpuinfo->reg_midr;
494
9703d9d7
CM
495 /*
496 * glibc reads /proc/cpuinfo to determine the number of
497 * online processors, looking for lines beginning with
498 * "processor". Give glibc what it expects.
499 */
500#ifdef CONFIG_SMP
501 seq_printf(m, "processor\t: %d\n", i);
502#endif
5e39977e 503
44b82b77
MR
504 /*
505 * Dump out the common processor features in a single line.
506 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
507 * rather than attempting to parse this, but there's a body of
508 * software which does already (at least for 32-bit).
509 */
510 seq_puts(m, "Features\t:");
511 if (personality(current->personality) == PER_LINUX32) {
512#ifdef CONFIG_COMPAT
513 for (j = 0; compat_hwcap_str[j]; j++)
514 if (compat_elf_hwcap & (1 << j))
515 seq_printf(m, " %s", compat_hwcap_str[j]);
516
517 for (j = 0; compat_hwcap2_str[j]; j++)
518 if (compat_elf_hwcap2 & (1 << j))
519 seq_printf(m, " %s", compat_hwcap2_str[j]);
520#endif /* CONFIG_COMPAT */
521 } else {
522 for (j = 0; hwcap_str[j]; j++)
523 if (elf_hwcap & (1 << j))
524 seq_printf(m, " %s", hwcap_str[j]);
525 }
526 seq_puts(m, "\n");
527
528 seq_printf(m, "CPU implementer\t: 0x%02x\n",
529 MIDR_IMPLEMENTOR(midr));
530 seq_printf(m, "CPU architecture: 8\n");
531 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
532 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
533 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
534 }
5e39977e 535
9703d9d7
CM
536 return 0;
537}
538
539static void *c_start(struct seq_file *m, loff_t *pos)
540{
541 return *pos < 1 ? (void *)1 : NULL;
542}
543
544static void *c_next(struct seq_file *m, void *v, loff_t *pos)
545{
546 ++*pos;
547 return NULL;
548}
549
550static void c_stop(struct seq_file *m, void *v)
551{
552}
553
554const struct seq_operations cpuinfo_op = {
555 .start = c_start,
556 .next = c_next,
557 .stop = c_stop,
558 .show = c_show
559};