]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/s390/kernel/setup.c
pinctrl: sh-pfc: r8a77965: Add DRIF pins, groups and functions
[mirror_ubuntu-focal-kernel.git] / arch / s390 / kernel / setup.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16 #define KMSG_COMPONENT "setup"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
19 #include <linux/errno.h>
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task.h>
23 #include <linux/cpu.h>
24 #include <linux/kernel.h>
25 #include <linux/memblock.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/ptrace.h>
30 #include <linux/random.h>
31 #include <linux/user.h>
32 #include <linux/tty.h>
33 #include <linux/ioport.h>
34 #include <linux/delay.h>
35 #include <linux/init.h>
36 #include <linux/initrd.h>
37 #include <linux/root_dev.h>
38 #include <linux/console.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/dma-contiguous.h>
41 #include <linux/device.h>
42 #include <linux/notifier.h>
43 #include <linux/pfn.h>
44 #include <linux/ctype.h>
45 #include <linux/reboot.h>
46 #include <linux/topology.h>
47 #include <linux/kexec.h>
48 #include <linux/crash_dump.h>
49 #include <linux/memory.h>
50 #include <linux/compat.h>
51 #include <linux/start_kernel.h>
52
53 #include <asm/ipl.h>
54 #include <asm/facility.h>
55 #include <asm/smp.h>
56 #include <asm/mmu_context.h>
57 #include <asm/cpcmd.h>
58 #include <asm/lowcore.h>
59 #include <asm/nmi.h>
60 #include <asm/irq.h>
61 #include <asm/page.h>
62 #include <asm/ptrace.h>
63 #include <asm/sections.h>
64 #include <asm/ebcdic.h>
65 #include <asm/diag.h>
66 #include <asm/os_info.h>
67 #include <asm/sclp.h>
68 #include <asm/sysinfo.h>
69 #include <asm/numa.h>
70 #include <asm/alternative.h>
71 #include <asm/nospec-branch.h>
72 #include <asm/mem_detect.h>
73 #include "entry.h"
74
75 /*
76 * Machine setup..
77 */
78 unsigned int console_mode = 0;
79 EXPORT_SYMBOL(console_mode);
80
81 unsigned int console_devno = -1;
82 EXPORT_SYMBOL(console_devno);
83
84 unsigned int console_irq = -1;
85 EXPORT_SYMBOL(console_irq);
86
87 unsigned long elf_hwcap __read_mostly = 0;
88 char elf_platform[ELF_PLATFORM_SIZE];
89
90 unsigned long int_hwcap = 0;
91
92 int __bootdata(noexec_disabled);
93 int __bootdata(memory_end_set);
94 unsigned long __bootdata(memory_end);
95 unsigned long __bootdata(max_physmem_end);
96 struct mem_detect_info __bootdata(mem_detect);
97
98 unsigned long VMALLOC_START;
99 EXPORT_SYMBOL(VMALLOC_START);
100
101 unsigned long VMALLOC_END;
102 EXPORT_SYMBOL(VMALLOC_END);
103
104 struct page *vmemmap;
105 EXPORT_SYMBOL(vmemmap);
106
107 unsigned long MODULES_VADDR;
108 unsigned long MODULES_END;
109
110 /* An array with a pointer to the lowcore of every CPU. */
111 struct lowcore *lowcore_ptr[NR_CPUS];
112 EXPORT_SYMBOL(lowcore_ptr);
113
114 /*
115 * This is set up by the setup-routine at boot-time
116 * for S390 need to find out, what we have to setup
117 * using address 0x10400 ...
118 */
119
120 #include <asm/setup.h>
121
122 /*
123 * condev= and conmode= setup parameter.
124 */
125
126 static int __init condev_setup(char *str)
127 {
128 int vdev;
129
130 vdev = simple_strtoul(str, &str, 0);
131 if (vdev >= 0 && vdev < 65536) {
132 console_devno = vdev;
133 console_irq = -1;
134 }
135 return 1;
136 }
137
138 __setup("condev=", condev_setup);
139
140 static void __init set_preferred_console(void)
141 {
142 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
143 add_preferred_console("ttyS", 0, NULL);
144 else if (CONSOLE_IS_3270)
145 add_preferred_console("tty3270", 0, NULL);
146 else if (CONSOLE_IS_VT220)
147 add_preferred_console("ttyS", 1, NULL);
148 else if (CONSOLE_IS_HVC)
149 add_preferred_console("hvc", 0, NULL);
150 }
151
152 static int __init conmode_setup(char *str)
153 {
154 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
155 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
156 SET_CONSOLE_SCLP;
157 #endif
158 #if defined(CONFIG_TN3215_CONSOLE)
159 if (strncmp(str, "3215", 5) == 0)
160 SET_CONSOLE_3215;
161 #endif
162 #if defined(CONFIG_TN3270_CONSOLE)
163 if (strncmp(str, "3270", 5) == 0)
164 SET_CONSOLE_3270;
165 #endif
166 set_preferred_console();
167 return 1;
168 }
169
170 __setup("conmode=", conmode_setup);
171
172 static void __init conmode_default(void)
173 {
174 char query_buffer[1024];
175 char *ptr;
176
177 if (MACHINE_IS_VM) {
178 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
179 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
180 ptr = strstr(query_buffer, "SUBCHANNEL =");
181 console_irq = simple_strtoul(ptr + 13, NULL, 16);
182 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
183 ptr = strstr(query_buffer, "CONMODE");
184 /*
185 * Set the conmode to 3215 so that the device recognition
186 * will set the cu_type of the console to 3215. If the
187 * conmode is 3270 and we don't set it back then both
188 * 3215 and the 3270 driver will try to access the console
189 * device (3215 as console and 3270 as normal tty).
190 */
191 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
192 if (ptr == NULL) {
193 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
194 SET_CONSOLE_SCLP;
195 #endif
196 return;
197 }
198 if (strncmp(ptr + 8, "3270", 4) == 0) {
199 #if defined(CONFIG_TN3270_CONSOLE)
200 SET_CONSOLE_3270;
201 #elif defined(CONFIG_TN3215_CONSOLE)
202 SET_CONSOLE_3215;
203 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
204 SET_CONSOLE_SCLP;
205 #endif
206 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
207 #if defined(CONFIG_TN3215_CONSOLE)
208 SET_CONSOLE_3215;
209 #elif defined(CONFIG_TN3270_CONSOLE)
210 SET_CONSOLE_3270;
211 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
212 SET_CONSOLE_SCLP;
213 #endif
214 }
215 } else if (MACHINE_IS_KVM) {
216 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
217 SET_CONSOLE_VT220;
218 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
219 SET_CONSOLE_SCLP;
220 else
221 SET_CONSOLE_HVC;
222 } else {
223 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
224 SET_CONSOLE_SCLP;
225 #endif
226 }
227 if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
228 conswitchp = &dummy_con;
229 }
230
231 #ifdef CONFIG_CRASH_DUMP
232 static void __init setup_zfcpdump(void)
233 {
234 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
235 return;
236 if (OLDMEM_BASE)
237 return;
238 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
239 console_loglevel = 2;
240 }
241 #else
242 static inline void setup_zfcpdump(void) {}
243 #endif /* CONFIG_CRASH_DUMP */
244
245 /*
246 * Reboot, halt and power_off stubs. They just call _machine_restart,
247 * _machine_halt or _machine_power_off.
248 */
249
250 void machine_restart(char *command)
251 {
252 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
253 /*
254 * Only unblank the console if we are called in enabled
255 * context or a bust_spinlocks cleared the way for us.
256 */
257 console_unblank();
258 _machine_restart(command);
259 }
260
261 void machine_halt(void)
262 {
263 if (!in_interrupt() || oops_in_progress)
264 /*
265 * Only unblank the console if we are called in enabled
266 * context or a bust_spinlocks cleared the way for us.
267 */
268 console_unblank();
269 _machine_halt();
270 }
271
272 void machine_power_off(void)
273 {
274 if (!in_interrupt() || oops_in_progress)
275 /*
276 * Only unblank the console if we are called in enabled
277 * context or a bust_spinlocks cleared the way for us.
278 */
279 console_unblank();
280 _machine_power_off();
281 }
282
283 /*
284 * Dummy power off function.
285 */
286 void (*pm_power_off)(void) = machine_power_off;
287 EXPORT_SYMBOL_GPL(pm_power_off);
288
289 static int __init parse_vmalloc(char *arg)
290 {
291 if (!arg)
292 return -EINVAL;
293 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
294 return 0;
295 }
296 early_param("vmalloc", parse_vmalloc);
297
298 void *restart_stack __section(.data);
299
300 unsigned long stack_alloc(void)
301 {
302 #ifdef CONFIG_VMAP_STACK
303 return (unsigned long)
304 __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
305 VMALLOC_START, VMALLOC_END,
306 THREADINFO_GFP,
307 PAGE_KERNEL, 0, NUMA_NO_NODE,
308 __builtin_return_address(0));
309 #else
310 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
311 #endif
312 }
313
314 void stack_free(unsigned long stack)
315 {
316 #ifdef CONFIG_VMAP_STACK
317 vfree((void *) stack);
318 #else
319 free_pages(stack, THREAD_SIZE_ORDER);
320 #endif
321 }
322
323 int __init arch_early_irq_init(void)
324 {
325 unsigned long stack;
326
327 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
328 if (!stack)
329 panic("Couldn't allocate async stack");
330 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
331 return 0;
332 }
333
334 static int __init async_stack_realloc(void)
335 {
336 unsigned long old, new;
337
338 old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
339 new = stack_alloc();
340 if (!new)
341 panic("Couldn't allocate async stack");
342 S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
343 free_pages(old, THREAD_SIZE_ORDER);
344 return 0;
345 }
346 early_initcall(async_stack_realloc);
347
348 void __init arch_call_rest_init(void)
349 {
350 struct stack_frame *frame;
351 unsigned long stack;
352
353 stack = stack_alloc();
354 if (!stack)
355 panic("Couldn't allocate kernel stack");
356 current->stack = (void *) stack;
357 #ifdef CONFIG_VMAP_STACK
358 current->stack_vm_area = (void *) stack;
359 #endif
360 set_task_stack_end_magic(current);
361 stack += STACK_INIT_OFFSET;
362 S390_lowcore.kernel_stack = stack;
363 frame = (struct stack_frame *) stack;
364 memset(frame, 0, sizeof(*frame));
365 /* Branch to rest_init on the new stack, never returns */
366 asm volatile(
367 " la 15,0(%[_frame])\n"
368 " jg rest_init\n"
369 : : [_frame] "a" (frame));
370 }
371
372 static void __init setup_lowcore(void)
373 {
374 struct lowcore *lc;
375
376 /*
377 * Setup lowcore for boot cpu
378 */
379 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
380 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
381 lc->restart_psw.mask = PSW_KERNEL_BITS;
382 lc->restart_psw.addr = (unsigned long) restart_int_handler;
383 lc->external_new_psw.mask = PSW_KERNEL_BITS |
384 PSW_MASK_DAT | PSW_MASK_MCHECK;
385 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
386 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
387 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
388 lc->svc_new_psw.addr = (unsigned long) system_call;
389 lc->program_new_psw.mask = PSW_KERNEL_BITS |
390 PSW_MASK_DAT | PSW_MASK_MCHECK;
391 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
392 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
393 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
394 lc->io_new_psw.mask = PSW_KERNEL_BITS |
395 PSW_MASK_DAT | PSW_MASK_MCHECK;
396 lc->io_new_psw.addr = (unsigned long) io_int_handler;
397 lc->clock_comparator = clock_comparator_max;
398 lc->nodat_stack = ((unsigned long) &init_thread_union)
399 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
400 lc->current_task = (unsigned long)&init_task;
401 lc->lpp = LPP_MAGIC;
402 lc->machine_flags = S390_lowcore.machine_flags;
403 lc->preempt_count = S390_lowcore.preempt_count;
404 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
405 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
406 sizeof(lc->stfle_fac_list));
407 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
408 sizeof(lc->alt_stfle_fac_list));
409 nmi_alloc_boot_cpu(lc);
410 vdso_alloc_boot_cpu(lc);
411 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
412 lc->async_enter_timer = S390_lowcore.async_enter_timer;
413 lc->exit_timer = S390_lowcore.exit_timer;
414 lc->user_timer = S390_lowcore.user_timer;
415 lc->system_timer = S390_lowcore.system_timer;
416 lc->steal_timer = S390_lowcore.steal_timer;
417 lc->last_update_timer = S390_lowcore.last_update_timer;
418 lc->last_update_clock = S390_lowcore.last_update_clock;
419
420 /*
421 * Allocate the global restart stack which is the same for
422 * all CPUs in cast *one* of them does a PSW restart.
423 */
424 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
425 restart_stack += STACK_INIT_OFFSET;
426
427 /*
428 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
429 * restart data to the absolute zero lowcore. This is necessary if
430 * PSW restart is done on an offline CPU that has lowcore zero.
431 */
432 lc->restart_stack = (unsigned long) restart_stack;
433 lc->restart_fn = (unsigned long) do_restart;
434 lc->restart_data = 0;
435 lc->restart_source = -1UL;
436
437 /* Setup absolute zero lowcore */
438 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
439 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
440 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
441 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
442 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
443
444 #ifdef CONFIG_SMP
445 lc->spinlock_lockval = arch_spin_lockval(0);
446 lc->spinlock_index = 0;
447 arch_spin_lock_setup(0);
448 #endif
449 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
450
451 set_prefix((u32)(unsigned long) lc);
452 lowcore_ptr[0] = lc;
453 }
454
455 static struct resource code_resource = {
456 .name = "Kernel code",
457 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
458 };
459
460 static struct resource data_resource = {
461 .name = "Kernel data",
462 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
463 };
464
465 static struct resource bss_resource = {
466 .name = "Kernel bss",
467 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
468 };
469
470 static struct resource __initdata *standard_resources[] = {
471 &code_resource,
472 &data_resource,
473 &bss_resource,
474 };
475
476 static void __init setup_resources(void)
477 {
478 struct resource *res, *std_res, *sub_res;
479 struct memblock_region *reg;
480 int j;
481
482 code_resource.start = (unsigned long) _text;
483 code_resource.end = (unsigned long) _etext - 1;
484 data_resource.start = (unsigned long) _etext;
485 data_resource.end = (unsigned long) _edata - 1;
486 bss_resource.start = (unsigned long) __bss_start;
487 bss_resource.end = (unsigned long) __bss_stop - 1;
488
489 for_each_memblock(memory, reg) {
490 res = memblock_alloc(sizeof(*res), 8);
491 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
492
493 res->name = "System RAM";
494 res->start = reg->base;
495 res->end = reg->base + reg->size - 1;
496 request_resource(&iomem_resource, res);
497
498 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
499 std_res = standard_resources[j];
500 if (std_res->start < res->start ||
501 std_res->start > res->end)
502 continue;
503 if (std_res->end > res->end) {
504 sub_res = memblock_alloc(sizeof(*sub_res), 8);
505 *sub_res = *std_res;
506 sub_res->end = res->end;
507 std_res->start = res->end + 1;
508 request_resource(res, sub_res);
509 } else {
510 request_resource(res, std_res);
511 }
512 }
513 }
514 #ifdef CONFIG_CRASH_DUMP
515 /*
516 * Re-add removed crash kernel memory as reserved memory. This makes
517 * sure it will be mapped with the identity mapping and struct pages
518 * will be created, so it can be resized later on.
519 * However add it later since the crash kernel resource should not be
520 * part of the System RAM resource.
521 */
522 if (crashk_res.end) {
523 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
524 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
525 insert_resource(&iomem_resource, &crashk_res);
526 }
527 #endif
528 }
529
530 static void __init setup_memory_end(void)
531 {
532 unsigned long vmax, vmalloc_size, tmp;
533
534 /* Choose kernel address space layout: 3 or 4 levels. */
535 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
536 if (IS_ENABLED(CONFIG_KASAN)) {
537 vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
538 ? _REGION1_SIZE
539 : _REGION2_SIZE;
540 } else {
541 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
542 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
543 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
544 vmax = _REGION2_SIZE; /* 3-level kernel page table */
545 else
546 vmax = _REGION1_SIZE; /* 4-level kernel page table */
547 }
548
549 /* module area is at the end of the kernel address space. */
550 MODULES_END = vmax;
551 MODULES_VADDR = MODULES_END - MODULES_LEN;
552 VMALLOC_END = MODULES_VADDR;
553 VMALLOC_START = VMALLOC_END - vmalloc_size;
554
555 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
556 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
557 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
558 tmp = SECTION_ALIGN_UP(tmp);
559 tmp = VMALLOC_START - tmp * sizeof(struct page);
560 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
561 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
562 vmemmap = (struct page *) tmp;
563
564 /* Take care that memory_end is set and <= vmemmap */
565 memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
566 #ifdef CONFIG_KASAN
567 /* fit in kasan shadow memory region between 1:1 and vmemmap */
568 memory_end = min(memory_end, KASAN_SHADOW_START);
569 vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
570 #endif
571 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
572 memblock_remove(memory_end, ULONG_MAX);
573
574 pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
575 }
576
577 #ifdef CONFIG_CRASH_DUMP
578
579 /*
580 * When kdump is enabled, we have to ensure that no memory from
581 * the area [0 - crashkernel memory size] and
582 * [crashk_res.start - crashk_res.end] is set offline.
583 */
584 static int kdump_mem_notifier(struct notifier_block *nb,
585 unsigned long action, void *data)
586 {
587 struct memory_notify *arg = data;
588
589 if (action != MEM_GOING_OFFLINE)
590 return NOTIFY_OK;
591 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
592 return NOTIFY_BAD;
593 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
594 return NOTIFY_OK;
595 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
596 return NOTIFY_OK;
597 return NOTIFY_BAD;
598 }
599
600 static struct notifier_block kdump_mem_nb = {
601 .notifier_call = kdump_mem_notifier,
602 };
603
604 #endif
605
606 /*
607 * Make sure that the area behind memory_end is protected
608 */
609 static void reserve_memory_end(void)
610 {
611 if (memory_end_set)
612 memblock_reserve(memory_end, ULONG_MAX);
613 }
614
615 /*
616 * Make sure that oldmem, where the dump is stored, is protected
617 */
618 static void reserve_oldmem(void)
619 {
620 #ifdef CONFIG_CRASH_DUMP
621 if (OLDMEM_BASE)
622 /* Forget all memory above the running kdump system */
623 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
624 #endif
625 }
626
627 /*
628 * Make sure that oldmem, where the dump is stored, is protected
629 */
630 static void remove_oldmem(void)
631 {
632 #ifdef CONFIG_CRASH_DUMP
633 if (OLDMEM_BASE)
634 /* Forget all memory above the running kdump system */
635 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
636 #endif
637 }
638
639 /*
640 * Reserve memory for kdump kernel to be loaded with kexec
641 */
642 static void __init reserve_crashkernel(void)
643 {
644 #ifdef CONFIG_CRASH_DUMP
645 unsigned long long crash_base, crash_size;
646 phys_addr_t low, high;
647 int rc;
648
649 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
650 &crash_base);
651
652 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
653 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
654 if (rc || crash_size == 0)
655 return;
656
657 if (memblock.memory.regions[0].size < crash_size) {
658 pr_info("crashkernel reservation failed: %s\n",
659 "first memory chunk must be at least crashkernel size");
660 return;
661 }
662
663 low = crash_base ?: OLDMEM_BASE;
664 high = low + crash_size;
665 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
666 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
667 crash_base = low;
668 } else {
669 /* Find suitable area in free memory */
670 low = max_t(unsigned long, crash_size, sclp.hsa_size);
671 high = crash_base ? crash_base + crash_size : ULONG_MAX;
672
673 if (crash_base && crash_base < low) {
674 pr_info("crashkernel reservation failed: %s\n",
675 "crash_base too low");
676 return;
677 }
678 low = crash_base ?: low;
679 crash_base = memblock_find_in_range(low, high, crash_size,
680 KEXEC_CRASH_MEM_ALIGN);
681 }
682
683 if (!crash_base) {
684 pr_info("crashkernel reservation failed: %s\n",
685 "no suitable area found");
686 return;
687 }
688
689 if (register_memory_notifier(&kdump_mem_nb))
690 return;
691
692 if (!OLDMEM_BASE && MACHINE_IS_VM)
693 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
694 crashk_res.start = crash_base;
695 crashk_res.end = crash_base + crash_size - 1;
696 memblock_remove(crash_base, crash_size);
697 pr_info("Reserving %lluMB of memory at %lluMB "
698 "for crashkernel (System RAM: %luMB)\n",
699 crash_size >> 20, crash_base >> 20,
700 (unsigned long)memblock.memory.total_size >> 20);
701 os_info_crashkernel_add(crash_base, crash_size);
702 #endif
703 }
704
705 /*
706 * Reserve the initrd from being used by memblock
707 */
708 static void __init reserve_initrd(void)
709 {
710 #ifdef CONFIG_BLK_DEV_INITRD
711 if (!INITRD_START || !INITRD_SIZE)
712 return;
713 initrd_start = INITRD_START;
714 initrd_end = initrd_start + INITRD_SIZE;
715 memblock_reserve(INITRD_START, INITRD_SIZE);
716 #endif
717 }
718
719 static void __init reserve_mem_detect_info(void)
720 {
721 unsigned long start, size;
722
723 get_mem_detect_reserved(&start, &size);
724 if (size)
725 memblock_reserve(start, size);
726 }
727
728 static void __init free_mem_detect_info(void)
729 {
730 unsigned long start, size;
731
732 get_mem_detect_reserved(&start, &size);
733 if (size)
734 memblock_free(start, size);
735 }
736
737 static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
738 {
739 memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
740 start, start + size - 1);
741 memblock_add_range(&memblock.memory, start, size, 0, 0);
742 memblock_add_range(&memblock.physmem, start, size, 0, 0);
743 }
744
745 static const char * __init get_mem_info_source(void)
746 {
747 switch (mem_detect.info_source) {
748 case MEM_DETECT_SCLP_STOR_INFO:
749 return "sclp storage info";
750 case MEM_DETECT_DIAG260:
751 return "diag260";
752 case MEM_DETECT_SCLP_READ_INFO:
753 return "sclp read info";
754 case MEM_DETECT_BIN_SEARCH:
755 return "binary search";
756 }
757 return "none";
758 }
759
760 static void __init memblock_add_mem_detect_info(void)
761 {
762 unsigned long start, end;
763 int i;
764
765 memblock_dbg("physmem info source: %s (%hhd)\n",
766 get_mem_info_source(), mem_detect.info_source);
767 /* keep memblock lists close to the kernel */
768 memblock_set_bottom_up(true);
769 for_each_mem_detect_block(i, &start, &end)
770 memblock_physmem_add(start, end - start);
771 memblock_set_bottom_up(false);
772 memblock_dump_all();
773 }
774
775 /*
776 * Check for initrd being in usable memory
777 */
778 static void __init check_initrd(void)
779 {
780 #ifdef CONFIG_BLK_DEV_INITRD
781 if (INITRD_START && INITRD_SIZE &&
782 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
783 pr_err("The initial RAM disk does not fit into the memory\n");
784 memblock_free(INITRD_START, INITRD_SIZE);
785 initrd_start = initrd_end = 0;
786 }
787 #endif
788 }
789
790 /*
791 * Reserve memory used for lowcore/command line/kernel image.
792 */
793 static void __init reserve_kernel(void)
794 {
795 unsigned long start_pfn = PFN_UP(__pa(_end));
796
797 #ifdef CONFIG_DMA_API_DEBUG
798 /*
799 * DMA_API_DEBUG code stumbles over addresses from the
800 * range [PARMAREA_END, _stext]. Mark the memory as reserved
801 * so it is not used for CONFIG_DMA_API_DEBUG=y.
802 */
803 memblock_reserve(0, PFN_PHYS(start_pfn));
804 #else
805 memblock_reserve(0, PARMAREA_END);
806 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
807 - (unsigned long)_stext);
808 #endif
809 }
810
811 static void __init setup_memory(void)
812 {
813 struct memblock_region *reg;
814
815 /*
816 * Init storage key for present memory
817 */
818 for_each_memblock(memory, reg) {
819 storage_key_init_range(reg->base, reg->base + reg->size);
820 }
821 psw_set_key(PAGE_DEFAULT_KEY);
822
823 /* Only cosmetics */
824 memblock_enforce_memory_limit(memblock_end_of_DRAM());
825 }
826
827 /*
828 * Setup hardware capabilities.
829 */
830 static int __init setup_hwcaps(void)
831 {
832 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
833 struct cpuid cpu_id;
834 int i;
835
836 /*
837 * The store facility list bits numbers as found in the principles
838 * of operation are numbered with bit 1UL<<31 as number 0 to
839 * bit 1UL<<0 as number 31.
840 * Bit 0: instructions named N3, "backported" to esa-mode
841 * Bit 2: z/Architecture mode is active
842 * Bit 7: the store-facility-list-extended facility is installed
843 * Bit 17: the message-security assist is installed
844 * Bit 19: the long-displacement facility is installed
845 * Bit 21: the extended-immediate facility is installed
846 * Bit 22: extended-translation facility 3 is installed
847 * Bit 30: extended-translation facility 3 enhancement facility
848 * These get translated to:
849 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
850 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
851 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
852 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
853 */
854 for (i = 0; i < 6; i++)
855 if (test_facility(stfl_bits[i]))
856 elf_hwcap |= 1UL << i;
857
858 if (test_facility(22) && test_facility(30))
859 elf_hwcap |= HWCAP_S390_ETF3EH;
860
861 /*
862 * Check for additional facilities with store-facility-list-extended.
863 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
864 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
865 * as stored by stfl, bits 32-xxx contain additional facilities.
866 * How many facility words are stored depends on the number of
867 * doublewords passed to the instruction. The additional facilities
868 * are:
869 * Bit 42: decimal floating point facility is installed
870 * Bit 44: perform floating point operation facility is installed
871 * translated to:
872 * HWCAP_S390_DFP bit 6 (42 && 44).
873 */
874 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
875 elf_hwcap |= HWCAP_S390_DFP;
876
877 /*
878 * Huge page support HWCAP_S390_HPAGE is bit 7.
879 */
880 if (MACHINE_HAS_EDAT1)
881 elf_hwcap |= HWCAP_S390_HPAGE;
882
883 /*
884 * 64-bit register support for 31-bit processes
885 * HWCAP_S390_HIGH_GPRS is bit 9.
886 */
887 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
888
889 /*
890 * Transactional execution support HWCAP_S390_TE is bit 10.
891 */
892 if (MACHINE_HAS_TE)
893 elf_hwcap |= HWCAP_S390_TE;
894
895 /*
896 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
897 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
898 * instead of facility bit 129.
899 */
900 if (MACHINE_HAS_VX) {
901 elf_hwcap |= HWCAP_S390_VXRS;
902 if (test_facility(134))
903 elf_hwcap |= HWCAP_S390_VXRS_EXT;
904 if (test_facility(135))
905 elf_hwcap |= HWCAP_S390_VXRS_BCD;
906 }
907
908 /*
909 * Guarded storage support HWCAP_S390_GS is bit 12.
910 */
911 if (MACHINE_HAS_GS)
912 elf_hwcap |= HWCAP_S390_GS;
913
914 get_cpu_id(&cpu_id);
915 add_device_randomness(&cpu_id, sizeof(cpu_id));
916 switch (cpu_id.machine) {
917 case 0x2064:
918 case 0x2066:
919 default: /* Use "z900" as default for 64 bit kernels. */
920 strcpy(elf_platform, "z900");
921 break;
922 case 0x2084:
923 case 0x2086:
924 strcpy(elf_platform, "z990");
925 break;
926 case 0x2094:
927 case 0x2096:
928 strcpy(elf_platform, "z9-109");
929 break;
930 case 0x2097:
931 case 0x2098:
932 strcpy(elf_platform, "z10");
933 break;
934 case 0x2817:
935 case 0x2818:
936 strcpy(elf_platform, "z196");
937 break;
938 case 0x2827:
939 case 0x2828:
940 strcpy(elf_platform, "zEC12");
941 break;
942 case 0x2964:
943 case 0x2965:
944 strcpy(elf_platform, "z13");
945 break;
946 case 0x3906:
947 case 0x3907:
948 strcpy(elf_platform, "z14");
949 break;
950 }
951
952 /*
953 * Virtualization support HWCAP_INT_SIE is bit 0.
954 */
955 if (sclp.has_sief2)
956 int_hwcap |= HWCAP_INT_SIE;
957
958 return 0;
959 }
960 arch_initcall(setup_hwcaps);
961
962 /*
963 * Add system information as device randomness
964 */
965 static void __init setup_randomness(void)
966 {
967 struct sysinfo_3_2_2 *vmms;
968
969 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
970 PAGE_SIZE);
971 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
972 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
973 memblock_free((unsigned long) vmms, PAGE_SIZE);
974 }
975
976 /*
977 * Find the correct size for the task_struct. This depends on
978 * the size of the struct fpu at the end of the thread_struct
979 * which is embedded in the task_struct.
980 */
981 static void __init setup_task_size(void)
982 {
983 int task_size = sizeof(struct task_struct);
984
985 if (!MACHINE_HAS_VX) {
986 task_size -= sizeof(__vector128) * __NUM_VXRS;
987 task_size += sizeof(freg_t) * __NUM_FPRS;
988 }
989 arch_task_struct_size = task_size;
990 }
991
992 /*
993 * Setup function called from init/main.c just after the banner
994 * was printed.
995 */
996
997 void __init setup_arch(char **cmdline_p)
998 {
999 /*
1000 * print what head.S has found out about the machine
1001 */
1002 if (MACHINE_IS_VM)
1003 pr_info("Linux is running as a z/VM "
1004 "guest operating system in 64-bit mode\n");
1005 else if (MACHINE_IS_KVM)
1006 pr_info("Linux is running under KVM in 64-bit mode\n");
1007 else if (MACHINE_IS_LPAR)
1008 pr_info("Linux is running natively in 64-bit mode\n");
1009
1010 /* Have one command line that is parsed and saved in /proc/cmdline */
1011 /* boot_command_line has been already set up in early.c */
1012 *cmdline_p = boot_command_line;
1013
1014 ROOT_DEV = Root_RAM0;
1015
1016 /* Is init_mm really needed? */
1017 init_mm.start_code = PAGE_OFFSET;
1018 init_mm.end_code = (unsigned long) _etext;
1019 init_mm.end_data = (unsigned long) _edata;
1020 init_mm.brk = (unsigned long) _end;
1021
1022 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1023 nospec_auto_detect();
1024
1025 parse_early_param();
1026 #ifdef CONFIG_CRASH_DUMP
1027 /* Deactivate elfcorehdr= kernel parameter */
1028 elfcorehdr_addr = ELFCORE_ADDR_MAX;
1029 #endif
1030
1031 os_info_init();
1032 setup_ipl();
1033 setup_task_size();
1034
1035 /* Do some memory reservations *before* memory is added to memblock */
1036 reserve_memory_end();
1037 reserve_oldmem();
1038 reserve_kernel();
1039 reserve_initrd();
1040 reserve_mem_detect_info();
1041 memblock_allow_resize();
1042
1043 /* Get information about *all* installed memory */
1044 memblock_add_mem_detect_info();
1045
1046 free_mem_detect_info();
1047 remove_oldmem();
1048
1049 /*
1050 * Make sure all chunks are MAX_ORDER aligned so we don't need the
1051 * extra checks that HOLES_IN_ZONE would require.
1052 *
1053 * Is this still required?
1054 */
1055 memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
1056
1057 setup_memory_end();
1058 setup_memory();
1059 dma_contiguous_reserve(memory_end);
1060 vmcp_cma_reserve();
1061
1062 check_initrd();
1063 reserve_crashkernel();
1064 #ifdef CONFIG_CRASH_DUMP
1065 /*
1066 * Be aware that smp_save_dump_cpus() triggers a system reset.
1067 * Therefore CPU and device initialization should be done afterwards.
1068 */
1069 smp_save_dump_cpus();
1070 #endif
1071
1072 setup_resources();
1073 setup_lowcore();
1074 smp_fill_possible_mask();
1075 cpu_detect_mhz_feature();
1076 cpu_init();
1077 numa_setup();
1078 smp_detect_cpus();
1079 topology_init_early();
1080
1081 /*
1082 * Create kernel page tables and switch to virtual addressing.
1083 */
1084 paging_init();
1085
1086 /* Setup default console */
1087 conmode_default();
1088 set_preferred_console();
1089
1090 apply_alternative_instructions();
1091 if (IS_ENABLED(CONFIG_EXPOLINE))
1092 nospec_init_branches();
1093
1094 /* Setup zfcpdump support */
1095 setup_zfcpdump();
1096
1097 /* Add system specific data to the random pool */
1098 setup_randomness();
1099 }