]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ia64/kernel/setup.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[mirror_ubuntu-bionic-kernel.git] / arch / ia64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * Architecture-specific setup.
3 *
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
e927ecb0
SS
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
1da177e4
LT
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 *
e927ecb0
SS
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
1da177e4
LT
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
08357f82 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
1da177e4 24 */
1da177e4
LT
25#include <linux/module.h>
26#include <linux/init.h>
27
28#include <linux/acpi.h>
29#include <linux/bootmem.h>
30#include <linux/console.h>
31#include <linux/delay.h>
32#include <linux/kernel.h>
33#include <linux/reboot.h>
34#include <linux/sched.h>
35#include <linux/seq_file.h>
36#include <linux/string.h>
37#include <linux/threads.h>
894673ee 38#include <linux/screen_info.h>
3ed3bce8 39#include <linux/dmi.h>
1da177e4
LT
40#include <linux/serial.h>
41#include <linux/serial_core.h>
42#include <linux/efi.h>
43#include <linux/initrd.h>
6c4fa560 44#include <linux/pm.h>
95235ca2 45#include <linux/cpufreq.h>
a7956113
ZN
46#include <linux/kexec.h>
47#include <linux/crash_dump.h>
1da177e4 48
1da177e4
LT
49#include <asm/machvec.h>
50#include <asm/mca.h>
51#include <asm/meminit.h>
52#include <asm/page.h>
e51835d5 53#include <asm/paravirt.h>
03f511dd 54#include <asm/paravirt_patch.h>
1da177e4
LT
55#include <asm/patch.h>
56#include <asm/pgtable.h>
57#include <asm/processor.h>
58#include <asm/sal.h>
59#include <asm/sections.h>
1da177e4
LT
60#include <asm/setup.h>
61#include <asm/smp.h>
62#include <asm/system.h>
2046b94e 63#include <asm/tlbflush.h>
1da177e4 64#include <asm/unistd.h>
8b713c67 65#include <asm/hpsim.h>
1da177e4
LT
66
67#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
68# error "struct cpuinfo_ia64 too big!"
69#endif
70
71#ifdef CONFIG_SMP
72unsigned long __per_cpu_offset[NR_CPUS];
73EXPORT_SYMBOL(__per_cpu_offset);
74#endif
75
877105cc 76DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
1da177e4 77DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
1da177e4
LT
78unsigned long ia64_cycles_per_usec;
79struct ia64_boot_param *ia64_boot_param;
80struct screen_info screen_info;
66b7f8a3
MM
81unsigned long vga_console_iobase;
82unsigned long vga_console_membase;
1da177e4 83
be379124
KA
84static struct resource data_resource = {
85 .name = "Kernel data",
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
87};
88
89static struct resource code_resource = {
90 .name = "Kernel code",
91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
92};
00bf4098
BW
93
94static struct resource bss_resource = {
95 .name = "Kernel bss",
96 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
97};
be379124 98
1da177e4 99unsigned long ia64_max_cacheline_size;
e1531b42 100
1da177e4
LT
101unsigned long ia64_iobase; /* virtual address for I/O accesses */
102EXPORT_SYMBOL(ia64_iobase);
103struct io_space io_space[MAX_IO_SPACES];
104EXPORT_SYMBOL(io_space);
105unsigned int num_io_spaces;
106
08357f82
ZM
107/*
108 * "flush_icache_range()" needs to know what processor dependent stride size to use
109 * when it makes i-cache(s) coherent with d-caches.
110 */
111#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
112unsigned long ia64_i_cache_stride_shift = ~0;
62fdd767
FY
113/*
114 * "clflush_cache_range()" needs to know what processor dependent stride size to
115 * use when it flushes cache lines including both d-cache and i-cache.
116 */
117/* Safest way to go: 32 bytes by 32 bytes */
118#define CACHE_STRIDE_SHIFT 5
119unsigned long ia64_cache_stride_shift = ~0;
08357f82 120
1da177e4
LT
121/*
122 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
123 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
124 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
125 * address of the second buffer must be aligned to (merge_mask+1) in order to be
126 * mergeable). By default, we assume there is no I/O MMU which can merge physically
127 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
128 * page-size of 2^64.
129 */
130unsigned long ia64_max_iommu_merge_mask = ~0UL;
131EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
132
133/*
134 * We use a special marker for the end of memory and it uses the extra (+1) slot
135 */
dae28066
KC
136struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
137int num_rsvd_regions __initdata;
1da177e4
LT
138
139
140/*
141 * Filter incoming memory segments based on the primitive map created from the boot
142 * parameters. Segments contained in the map are removed from the memory ranges. A
143 * caller-specified function is called with the memory ranges that remain after filtering.
144 * This routine does not assume the incoming segments are sorted.
145 */
dae28066 146int __init
e088a4ad 147filter_rsvd_memory (u64 start, u64 end, void *arg)
1da177e4 148{
e088a4ad 149 u64 range_start, range_end, prev_start;
1da177e4
LT
150 void (*func)(unsigned long, unsigned long, int);
151 int i;
152
153#if IGNORE_PFN0
154 if (start == PAGE_OFFSET) {
155 printk(KERN_WARNING "warning: skipping physical page 0\n");
156 start += PAGE_SIZE;
157 if (start >= end) return 0;
158 }
159#endif
160 /*
161 * lowest possible address(walker uses virtual)
162 */
163 prev_start = PAGE_OFFSET;
164 func = arg;
165
166 for (i = 0; i < num_rsvd_regions; ++i) {
167 range_start = max(start, prev_start);
168 range_end = min(end, rsvd_region[i].start);
169
170 if (range_start < range_end)
171 call_pernode_memory(__pa(range_start), range_end - range_start, func);
172
173 /* nothing more available in this segment */
174 if (range_end == end) return 0;
175
176 prev_start = rsvd_region[i].end;
177 }
178 /* end of memory marker allows full processing inside loop body */
179 return 0;
180}
181
98075d24
ZM
182/*
183 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
184 * are not filtered out.
185 */
186int __init
e088a4ad 187filter_memory(u64 start, u64 end, void *arg)
98075d24
ZM
188{
189 void (*func)(unsigned long, unsigned long, int);
190
191#if IGNORE_PFN0
192 if (start == PAGE_OFFSET) {
193 printk(KERN_WARNING "warning: skipping physical page 0\n");
194 start += PAGE_SIZE;
195 if (start >= end)
196 return 0;
197 }
198#endif
199 func = arg;
200 if (start < end)
201 call_pernode_memory(__pa(start), end - start, func);
202 return 0;
203}
204
dae28066 205static void __init
1da177e4
LT
206sort_regions (struct rsvd_region *rsvd_region, int max)
207{
208 int j;
209
210 /* simple bubble sorting */
211 while (max--) {
212 for (j = 0; j < max; ++j) {
213 if (rsvd_region[j].start > rsvd_region[j+1].start) {
214 struct rsvd_region tmp;
215 tmp = rsvd_region[j];
216 rsvd_region[j] = rsvd_region[j + 1];
217 rsvd_region[j + 1] = tmp;
218 }
219 }
220 }
221}
222
be379124
KA
223/*
224 * Request address space for all standard resources
225 */
226static int __init register_memory(void)
227{
228 code_resource.start = ia64_tpa(_text);
229 code_resource.end = ia64_tpa(_etext) - 1;
230 data_resource.start = ia64_tpa(_etext);
00bf4098 231 data_resource.end = ia64_tpa(_edata) - 1;
b898a424 232 bss_resource.start = ia64_tpa(__bss_start);
00bf4098
BW
233 bss_resource.end = ia64_tpa(_end) - 1;
234 efi_initialize_iomem_resources(&code_resource, &data_resource,
235 &bss_resource);
be379124
KA
236
237 return 0;
238}
239
240__initcall(register_memory);
241
cb380853
BW
242
243#ifdef CONFIG_KEXEC
8a3360f0
BW
244
245/*
246 * This function checks if the reserved crashkernel is allowed on the specific
247 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
248 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
249 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
250 * in kdump case. See the comment in sba_init() in sba_iommu.c.
251 *
252 * So, the only machvec that really supports loading the kdump kernel
253 * over 4 GB is "sn2".
254 */
255static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
256{
257 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
258 return 1;
259 else
260 return pbase < (1UL << 32);
261}
262
cb380853
BW
263static void __init setup_crashkernel(unsigned long total, int *n)
264{
265 unsigned long long base = 0, size = 0;
266 int ret;
267
268 ret = parse_crashkernel(boot_command_line, total,
269 &size, &base);
270 if (ret == 0 && size > 0) {
271 if (!base) {
272 sort_regions(rsvd_region, *n);
273 base = kdump_find_rsvd_region(size,
274 rsvd_region, *n);
275 }
8a3360f0
BW
276
277 if (!check_crashkernel_memory(base, size)) {
278 pr_warning("crashkernel: There would be kdump memory "
279 "at %ld GB but this is unusable because it "
280 "must\nbe below 4 GB. Change the memory "
281 "configuration of the machine.\n",
282 (unsigned long)(base >> 30));
283 return;
284 }
285
cb380853
BW
286 if (base != ~0UL) {
287 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
288 "for crashkernel (System RAM: %ldMB)\n",
289 (unsigned long)(size >> 20),
290 (unsigned long)(base >> 20),
291 (unsigned long)(total >> 20));
292 rsvd_region[*n].start =
293 (unsigned long)__va(base);
294 rsvd_region[*n].end =
295 (unsigned long)__va(base + size);
296 (*n)++;
297 crashk_res.start = base;
298 crashk_res.end = base + size - 1;
299 }
300 }
301 efi_memmap_res.start = ia64_boot_param->efi_memmap;
302 efi_memmap_res.end = efi_memmap_res.start +
303 ia64_boot_param->efi_memmap_size;
304 boot_param_res.start = __pa(ia64_boot_param);
305 boot_param_res.end = boot_param_res.start +
306 sizeof(*ia64_boot_param);
307}
308#else
309static inline void __init setup_crashkernel(unsigned long total, int *n)
310{}
311#endif
312
1da177e4
LT
313/**
314 * reserve_memory - setup reserved memory areas
315 *
316 * Setup the reserved memory areas set aside for the boot parameters,
317 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
7f30491c 318 * see arch/ia64/include/asm/meminit.h if you need to define more.
1da177e4 319 */
dae28066 320void __init
1da177e4
LT
321reserve_memory (void)
322{
323 int n = 0;
cb380853 324 unsigned long total_memory;
1da177e4
LT
325
326 /*
327 * none of the entries in this table overlap
328 */
329 rsvd_region[n].start = (unsigned long) ia64_boot_param;
330 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
331 n++;
332
333 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
334 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
335 n++;
336
337 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
338 rsvd_region[n].end = (rsvd_region[n].start
339 + strlen(__va(ia64_boot_param->command_line)) + 1);
340 n++;
341
342 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
343 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
344 n++;
345
e51835d5
IY
346 n += paravirt_reserve_memory(&rsvd_region[n]);
347
1da177e4
LT
348#ifdef CONFIG_BLK_DEV_INITRD
349 if (ia64_boot_param->initrd_start) {
350 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
351 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
352 n++;
353 }
354#endif
355
17c1f07e 356#ifdef CONFIG_CRASH_DUMP
cee87af2
MD
357 if (reserve_elfcorehdr(&rsvd_region[n].start,
358 &rsvd_region[n].end) == 0)
359 n++;
360#endif
361
cb380853 362 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
d8c97d5f
TL
363 n++;
364
cb380853
BW
365 setup_crashkernel(total_memory, &n);
366
1da177e4
LT
367 /* end of memory marker */
368 rsvd_region[n].start = ~0UL;
369 rsvd_region[n].end = ~0UL;
370 n++;
371
372 num_rsvd_regions = n;
5eb1d63f 373 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
1da177e4
LT
374
375 sort_regions(rsvd_region, num_rsvd_regions);
376}
377
a7956113 378
1da177e4
LT
379/**
380 * find_initrd - get initrd parameters from the boot parameter structure
381 *
382 * Grab the initrd start and end from the boot parameter struct given us by
383 * the boot loader.
384 */
dae28066 385void __init
1da177e4
LT
386find_initrd (void)
387{
388#ifdef CONFIG_BLK_DEV_INITRD
389 if (ia64_boot_param->initrd_start) {
390 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
391 initrd_end = initrd_start+ia64_boot_param->initrd_size;
392
e088a4ad 393 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
1da177e4
LT
394 initrd_start, ia64_boot_param->initrd_size);
395 }
396#endif
397}
398
399static void __init
400io_port_init (void)
401{
1da177e4
LT
402 unsigned long phys_iobase;
403
404 /*
44c45120
BH
405 * Set `iobase' based on the EFI memory map or, failing that, the
406 * value firmware left in ar.k0.
1da177e4 407 *
44c45120
BH
408 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
409 * the port's virtual address, so ia32_load_state() loads it with a
410 * user virtual address. But in ia64 mode, glibc uses the
411 * *physical* address in ar.k0 to mmap the appropriate area from
412 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
413 * cases, user-mode can only use the legacy 0-64K I/O port space.
414 *
415 * ar.k0 is not involved in kernel I/O port accesses, which can use
416 * any of the I/O port spaces and are done via MMIO using the
417 * virtual mmio_base from the appropriate io_space[].
1da177e4
LT
418 */
419 phys_iobase = efi_get_iobase();
44c45120 420 if (!phys_iobase) {
1da177e4 421 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
44c45120
BH
422 printk(KERN_INFO "No I/O port range found in EFI memory map, "
423 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
1da177e4
LT
424 }
425 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
44c45120 426 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
1da177e4
LT
427
428 /* setup legacy IO port space */
429 io_space[0].mmio_base = ia64_iobase;
430 io_space[0].sparse = 1;
431 num_io_spaces = 1;
432}
433
434/**
435 * early_console_setup - setup debugging console
436 *
437 * Consoles started here require little enough setup that we can start using
438 * them very early in the boot process, either right after the machine
439 * vector initialization, or even before if the drivers can detect their hw.
440 *
441 * Returns non-zero if a console couldn't be setup.
442 */
443static inline int __init
444early_console_setup (char *cmdline)
445{
66b7f8a3
MM
446 int earlycons = 0;
447
1da177e4
LT
448#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
449 {
450 extern int sn_serial_console_early_setup(void);
451 if (!sn_serial_console_early_setup())
66b7f8a3 452 earlycons++;
1da177e4
LT
453 }
454#endif
455#ifdef CONFIG_EFI_PCDP
456 if (!efi_setup_pcdp_console(cmdline))
66b7f8a3 457 earlycons++;
1da177e4 458#endif
8b713c67 459 if (!simcons_register())
471e7a44 460 earlycons++;
1da177e4 461
66b7f8a3 462 return (earlycons) ? 0 : -1;
1da177e4
LT
463}
464
465static inline void
466mark_bsp_online (void)
467{
468#ifdef CONFIG_SMP
469 /* If we register an early console, allow CPU 0 to printk */
470 cpu_set(smp_processor_id(), cpu_online_map);
471#endif
472}
473
a5b00bb4
H
474static __initdata int nomca;
475static __init int setup_nomca(char *s)
476{
477 nomca = 1;
478 return 0;
479}
480early_param("nomca", setup_nomca);
481
57cac4d1
VG
482/*
483 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
484 * is_kdump_kernel() to determine if we are booting after a panic. Hence
485 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
486 */
487#ifdef CONFIG_CRASH_DUMP
45a98fc6
H
488/* elfcorehdr= specifies the location of elf core header
489 * stored by the crashed kernel.
490 */
491static int __init parse_elfcorehdr(char *arg)
492{
493 if (!arg)
494 return -EINVAL;
495
496 elfcorehdr_addr = memparse(arg, &arg);
497 return 0;
498}
499early_param("elfcorehdr", parse_elfcorehdr);
cee87af2 500
e088a4ad 501int __init reserve_elfcorehdr(u64 *start, u64 *end)
cee87af2 502{
e088a4ad 503 u64 length;
cee87af2
MD
504
505 /* We get the address using the kernel command line,
506 * but the size is extracted from the EFI tables.
507 * Both address and size are required for reservation
508 * to work properly.
509 */
510
85a0ee34 511 if (!is_vmcore_usable())
cee87af2
MD
512 return -EINVAL;
513
514 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
85a0ee34 515 vmcore_unusable();
cee87af2
MD
516 return -EINVAL;
517 }
518
519 *start = (unsigned long)__va(elfcorehdr_addr);
520 *end = *start + length;
521 return 0;
522}
523
45a98fc6
H
524#endif /* CONFIG_PROC_VMCORE */
525
1da177e4
LT
526void __init
527setup_arch (char **cmdline_p)
528{
529 unw_init();
530
e51835d5
IY
531 paravirt_arch_setup_early();
532
1da177e4 533 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
03f511dd 534 paravirt_patch_apply();
1da177e4
LT
535
536 *cmdline_p = __va(ia64_boot_param->command_line);
a8d91b84 537 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
1da177e4
LT
538
539 efi_init();
540 io_port_init();
541
542#ifdef CONFIG_IA64_GENERIC
a07ee862
H
543 /* machvec needs to be parsed from the command line
544 * before parse_early_param() is called to ensure
545 * that ia64_mv is initialised before any command line
546 * settings may cause console setup to occur
547 */
548 machvec_init_from_cmdline(*cmdline_p);
1da177e4
LT
549#endif
550
a07ee862
H
551 parse_early_param();
552
1da177e4
LT
553 if (early_console_setup(*cmdline_p) == 0)
554 mark_bsp_online();
555
888ba6c6 556#ifdef CONFIG_ACPI
1da177e4
LT
557 /* Initialize the ACPI boot-time table parser */
558 acpi_table_init();
62ee0540 559 early_acpi_boot_init();
1da177e4
LT
560# ifdef CONFIG_ACPI_NUMA
561 acpi_numa_init();
12cda817 562# ifdef CONFIG_ACPI_HOTPLUG_CPU
62ee0540 563 prefill_possible_map();
12cda817 564# endif
2c6e6db4 565 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
dd4f0888
TL
566 32 : cpus_weight(early_cpu_possible_map)),
567 additional_cpus > 0 ? additional_cpus : 0);
1da177e4 568# endif
1da177e4
LT
569#endif /* CONFIG_APCI_BOOT */
570
12cda817
TH
571#ifdef CONFIG_SMP
572 smp_build_cpu_map();
573#endif
1da177e4
LT
574 find_memory();
575
576 /* process SAL system table: */
b2c99e3c 577 ia64_sal_init(__va(efi.sal_systab));
1da177e4 578
4dcc29e1
TL
579#ifdef CONFIG_ITANIUM
580 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
581#else
582 {
e088a4ad 583 unsigned long num_phys_stacked;
4dcc29e1
TL
584
585 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
586 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
587 }
588#endif
589
1da177e4
LT
590#ifdef CONFIG_SMP
591 cpu_physical_id(0) = hard_smp_processor_id();
592#endif
593
594 cpu_init(); /* initialize the bootstrap CPU */
dcc17d1b 595 mmu_context_init(); /* initialize context_id bitmap */
1da177e4 596
e51835d5
IY
597 paravirt_banner();
598 paravirt_arch_setup_console(cmdline_p);
599
1da177e4
LT
600#ifdef CONFIG_VT
601 if (!conswitchp) {
602# if defined(CONFIG_DUMMY_CONSOLE)
603 conswitchp = &dummy_con;
604# endif
605# if defined(CONFIG_VGA_CONSOLE)
606 /*
607 * Non-legacy systems may route legacy VGA MMIO range to system
608 * memory. vga_con probes the MMIO hole, so memory looks like
609 * a VGA device to it. The EFI memory map can tell us if it's
610 * memory so we can avoid this problem.
611 */
612 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
613 conswitchp = &vga_con;
614# endif
615 }
616#endif
617
618 /* enable IA-64 Machine Check Abort Handling unless disabled */
e51835d5
IY
619 if (paravirt_arch_setup_nomca())
620 nomca = 1;
a5b00bb4 621 if (!nomca)
1da177e4
LT
622 ia64_mca_init();
623
624 platform_setup(cmdline_p);
06f95ea8 625#ifndef CONFIG_IA64_HP_SIM
2826f8c0 626 check_sal_cache_flush();
06f95ea8 627#endif
1da177e4
LT
628 paging_init();
629}
630
631/*
72fdbdce 632 * Display cpu info for all CPUs.
1da177e4
LT
633 */
634static int
635show_cpuinfo (struct seq_file *m, void *v)
636{
637#ifdef CONFIG_SMP
638# define lpj c->loops_per_jiffy
639# define cpunum c->cpu
640#else
641# define lpj loops_per_jiffy
642# define cpunum 0
643#endif
644 static struct {
645 unsigned long mask;
646 const char *feature_name;
647 } feature_bits[] = {
648 { 1UL << 0, "branchlong" },
649 { 1UL << 1, "spontaneous deferral"},
650 { 1UL << 2, "16-byte atomic ops" }
651 };
ae0af3e3 652 char features[128], *cp, *sep;
1da177e4
LT
653 struct cpuinfo_ia64 *c = v;
654 unsigned long mask;
38c0b2c2 655 unsigned long proc_freq;
ae0af3e3 656 int i, size;
1da177e4
LT
657
658 mask = c->features;
659
1da177e4 660 /* build the feature string: */
ae0af3e3 661 memcpy(features, "standard", 9);
1da177e4 662 cp = features;
ae0af3e3
AG
663 size = sizeof(features);
664 sep = "";
665 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
1da177e4 666 if (mask & feature_bits[i].mask) {
ae0af3e3
AG
667 cp += snprintf(cp, size, "%s%s", sep,
668 feature_bits[i].feature_name),
669 sep = ", ";
1da177e4 670 mask &= ~feature_bits[i].mask;
ae0af3e3 671 size = sizeof(features) - (cp - features);
1da177e4
LT
672 }
673 }
ae0af3e3
AG
674 if (mask && size > 1) {
675 /* print unknown features as a hex value */
676 snprintf(cp, size, "%s0x%lx", sep, mask);
1da177e4
LT
677 }
678
95235ca2
VP
679 proc_freq = cpufreq_quick_get(cpunum);
680 if (!proc_freq)
681 proc_freq = c->proc_freq / 1000;
682
1da177e4
LT
683 seq_printf(m,
684 "processor : %d\n"
685 "vendor : %s\n"
686 "arch : IA-64\n"
76d08bb3 687 "family : %u\n"
1da177e4 688 "model : %u\n"
76d08bb3 689 "model name : %s\n"
1da177e4
LT
690 "revision : %u\n"
691 "archrev : %u\n"
ae0af3e3 692 "features : %s\n"
1da177e4
LT
693 "cpu number : %lu\n"
694 "cpu regs : %u\n"
8a3a78d1 695 "cpu MHz : %lu.%03lu\n"
1da177e4 696 "itc MHz : %lu.%06lu\n"
e927ecb0 697 "BogoMIPS : %lu.%02lu\n",
76d08bb3
TL
698 cpunum, c->vendor, c->family, c->model,
699 c->model_name, c->revision, c->archrev,
1da177e4 700 features, c->ppn, c->number,
95235ca2 701 proc_freq / 1000, proc_freq % 1000,
1da177e4
LT
702 c->itc_freq / 1000000, c->itc_freq % 1000000,
703 lpj*HZ/500000, (lpj*HZ/5000) % 100);
e927ecb0 704#ifdef CONFIG_SMP
ce6e71ad 705 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
113134fc
AC
706 if (c->socket_id != -1)
707 seq_printf(m, "physical id: %u\n", c->socket_id);
e927ecb0
SS
708 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
709 seq_printf(m,
113134fc
AC
710 "core id : %u\n"
711 "thread id : %u\n",
712 c->core_id, c->thread_id);
e927ecb0
SS
713#endif
714 seq_printf(m,"\n");
715
1da177e4
LT
716 return 0;
717}
718
719static void *
720c_start (struct seq_file *m, loff_t *pos)
721{
722#ifdef CONFIG_SMP
5dd3c994 723 while (*pos < nr_cpu_ids && !cpu_online(*pos))
1da177e4
LT
724 ++*pos;
725#endif
5dd3c994 726 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
1da177e4
LT
727}
728
729static void *
730c_next (struct seq_file *m, void *v, loff_t *pos)
731{
732 ++*pos;
733 return c_start(m, pos);
734}
735
736static void
737c_stop (struct seq_file *m, void *v)
738{
739}
740
a23fe55e 741const struct seq_operations cpuinfo_op = {
1da177e4
LT
742 .start = c_start,
743 .next = c_next,
744 .stop = c_stop,
745 .show = show_cpuinfo
746};
747
c5e83e3f
JS
748#define MAX_BRANDS 8
749static char brandname[MAX_BRANDS][128];
76d08bb3
TL
750
751static char * __cpuinit
752get_model_name(__u8 family, __u8 model)
753{
c5e83e3f 754 static int overflow;
76d08bb3 755 char brand[128];
c5e83e3f 756 int i;
76d08bb3 757
75f6a1de 758 memcpy(brand, "Unknown", 8);
76d08bb3
TL
759 if (ia64_pal_get_brand_info(brand)) {
760 if (family == 0x7)
761 memcpy(brand, "Merced", 7);
762 else if (family == 0x1f) switch (model) {
763 case 0: memcpy(brand, "McKinley", 9); break;
764 case 1: memcpy(brand, "Madison", 8); break;
765 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
75f6a1de 766 }
76d08bb3 767 }
c5e83e3f
JS
768 for (i = 0; i < MAX_BRANDS; i++)
769 if (strcmp(brandname[i], brand) == 0)
770 return brandname[i];
771 for (i = 0; i < MAX_BRANDS; i++)
772 if (brandname[i][0] == '\0')
773 return strcpy(brandname[i], brand);
774 if (overflow++ == 0)
775 printk(KERN_ERR
776 "%s: Table overflow. Some processor model information will be missing\n",
d4ed8084 777 __func__);
c5e83e3f 778 return "Unknown";
76d08bb3
TL
779}
780
244fd545 781static void __cpuinit
1da177e4
LT
782identify_cpu (struct cpuinfo_ia64 *c)
783{
784 union {
785 unsigned long bits[5];
786 struct {
787 /* id 0 & 1: */
788 char vendor[16];
789
790 /* id 2 */
791 u64 ppn; /* processor serial number */
792
793 /* id 3: */
794 unsigned number : 8;
795 unsigned revision : 8;
796 unsigned model : 8;
797 unsigned family : 8;
798 unsigned archrev : 8;
799 unsigned reserved : 24;
800
801 /* id 4: */
802 u64 features;
803 } field;
804 } cpuid;
805 pal_vm_info_1_u_t vm1;
806 pal_vm_info_2_u_t vm2;
807 pal_status_t status;
808 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
809 int i;
1da177e4
LT
810 for (i = 0; i < 5; ++i)
811 cpuid.bits[i] = ia64_get_cpuid(i);
812
813 memcpy(c->vendor, cpuid.field.vendor, 16);
814#ifdef CONFIG_SMP
815 c->cpu = smp_processor_id();
e927ecb0
SS
816
817 /* below default values will be overwritten by identify_siblings()
72fdbdce 818 * for Multi-Threading/Multi-Core capable CPUs
e927ecb0
SS
819 */
820 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
821 c->socket_id = -1;
822
823 identify_siblings(c);
113134fc
AC
824
825 if (c->threads_per_core > smp_num_siblings)
826 smp_num_siblings = c->threads_per_core;
1da177e4
LT
827#endif
828 c->ppn = cpuid.field.ppn;
829 c->number = cpuid.field.number;
830 c->revision = cpuid.field.revision;
831 c->model = cpuid.field.model;
832 c->family = cpuid.field.family;
833 c->archrev = cpuid.field.archrev;
834 c->features = cpuid.field.features;
76d08bb3 835 c->model_name = get_model_name(c->family, c->model);
1da177e4
LT
836
837 status = ia64_pal_vm_summary(&vm1, &vm2);
838 if (status == PAL_STATUS_SUCCESS) {
839 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
840 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
841 }
842 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
843 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
844}
845
08357f82 846/*
62fdd767 847 * Do the following calculations:
08357f82 848 *
62fdd767
FY
849 * 1. the max. cache line size.
850 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
851 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
08357f82 852 */
244fd545 853static void __cpuinit
62fdd767 854get_cache_info(void)
1da177e4
LT
855{
856 unsigned long line_size, max = 1;
e088a4ad
MW
857 unsigned long l, levels, unique_caches;
858 pal_cache_config_info_t cci;
859 long status;
1da177e4
LT
860
861 status = ia64_pal_cache_summary(&levels, &unique_caches);
862 if (status != 0) {
863 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
d4ed8084 864 __func__, status);
1da177e4 865 max = SMP_CACHE_BYTES;
08357f82
ZM
866 /* Safest setup for "flush_icache_range()" */
867 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
62fdd767
FY
868 /* Safest setup for "clflush_cache_range()" */
869 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
1da177e4
LT
870 goto out;
871 }
872
873 for (l = 0; l < levels; ++l) {
62fdd767
FY
874 /* cache_type (data_or_unified)=2 */
875 status = ia64_pal_cache_config_info(l, 2, &cci);
1da177e4 876 if (status != 0) {
e088a4ad
MW
877 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
878 "(l=%lu, 2) failed (status=%ld)\n",
879 __func__, l, status);
1da177e4 880 max = SMP_CACHE_BYTES;
08357f82
ZM
881 /* The safest setup for "flush_icache_range()" */
882 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
62fdd767
FY
883 /* The safest setup for "clflush_cache_range()" */
884 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
08357f82 885 cci.pcci_unified = 1;
62fdd767
FY
886 } else {
887 if (cci.pcci_stride < ia64_cache_stride_shift)
888 ia64_cache_stride_shift = cci.pcci_stride;
889
890 line_size = 1 << cci.pcci_line_size;
891 if (line_size > max)
892 max = line_size;
1da177e4 893 }
62fdd767 894
08357f82 895 if (!cci.pcci_unified) {
62fdd767
FY
896 /* cache_type (instruction)=1*/
897 status = ia64_pal_cache_config_info(l, 1, &cci);
08357f82 898 if (status != 0) {
e088a4ad
MW
899 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
900 "(l=%lu, 1) failed (status=%ld)\n",
d4ed8084 901 __func__, l, status);
e088a4ad 902 /* The safest setup for flush_icache_range() */
08357f82
ZM
903 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
904 }
905 }
906 if (cci.pcci_stride < ia64_i_cache_stride_shift)
907 ia64_i_cache_stride_shift = cci.pcci_stride;
908 }
1da177e4
LT
909 out:
910 if (max > ia64_max_cacheline_size)
911 ia64_max_cacheline_size = max;
912}
913
914/*
915 * cpu_init() initializes state that is per-CPU. This function acts
916 * as a 'CPU state barrier', nothing should get across.
917 */
244fd545 918void __cpuinit
1da177e4
LT
919cpu_init (void)
920{
244fd545 921 extern void __cpuinit ia64_mmu_init (void *);
a0776ec8 922 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
1da177e4
LT
923 unsigned long num_phys_stacked;
924 pal_vm_info_2_u_t vmi;
925 unsigned int max_ctx;
926 struct cpuinfo_ia64 *cpu_info;
927 void *cpu_data;
928
929 cpu_data = per_cpu_init();
4d1efed5 930#ifdef CONFIG_SMP
d5a7430d
MT
931 /*
932 * insert boot cpu into sibling and core mapes
933 * (must be done after per_cpu area is setup)
934 */
935 if (smp_processor_id() == 0) {
936 cpu_set(0, per_cpu(cpu_sibling_map, 0));
937 cpu_set(0, cpu_core_map[0]);
10617bbe
TL
938 } else {
939 /*
940 * Set ar.k3 so that assembly code in MCA handler can compute
941 * physical addresses of per cpu variables with a simple:
942 * phys = ar.k3 + &per_cpu_var
943 * and the alt-dtlb-miss handler can set per-cpu mapping into
944 * the TLB when needed. head.S already did this for cpu0.
945 */
946 ia64_set_kr(IA64_KR_PER_CPU_DATA,
947 ia64_tpa(cpu_data) - (long) __per_cpu_start);
d5a7430d 948 }
4d1efed5 949#endif
1da177e4 950
62fdd767 951 get_cache_info();
1da177e4
LT
952
953 /*
954 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
955 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
956 * depends on the data returned by identify_cpu(). We break the dependency by
957 * accessing cpu_data() through the canonical per-CPU address.
958 */
877105cc 959 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
1da177e4
LT
960 identify_cpu(cpu_info);
961
962#ifdef CONFIG_MCKINLEY
963 {
964# define FEATURE_SET 16
965 struct ia64_pal_retval iprv;
966
967 if (cpu_info->family == 0x1f) {
968 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
969 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
970 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
971 (iprv.v1 | 0x80), FEATURE_SET, 0);
972 }
973 }
974#endif
975
976 /* Clear the stack memory reserved for pt_regs: */
6450578f 977 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
1da177e4
LT
978
979 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
980
981 /*
982 * Initialize the page-table base register to a global
983 * directory with all zeroes. This ensure that we can handle
984 * TLB-misses to user address-space even before we created the
985 * first user address-space. This may happen, e.g., due to
986 * aggressive use of lfetch.fault.
987 */
988 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
989
990 /*
86ebacd3
TL
991 * Initialize default control register to defer speculative faults except
992 * for those arising from TLB misses, which are not deferred. The
1da177e4
LT
993 * kernel MUST NOT depend on a particular setting of these bits (in other words,
994 * the kernel must have recovery code for all speculative accesses). Turn on
995 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
996 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
997 * be fine).
998 */
999 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
1000 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
1001 atomic_inc(&init_mm.mm_count);
1002 current->active_mm = &init_mm;
80a03e29 1003 BUG_ON(current->mm);
1da177e4
LT
1004
1005 ia64_mmu_init(ia64_imva(cpu_data));
1006 ia64_mca_cpu_init(ia64_imva(cpu_data));
1007
72fdbdce 1008 /* Clear ITC to eliminate sched_clock() overflows in human time. */
1da177e4
LT
1009 ia64_set_itc(0);
1010
1011 /* disable all local interrupt sources: */
1012 ia64_set_itv(1 << 16);
1013 ia64_set_lrr0(1 << 16);
1014 ia64_set_lrr1(1 << 16);
1015 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1016 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1017
1018 /* clear TPR & XTP to enable all interrupt classes: */
1019 ia64_setreg(_IA64_REG_CR_TPR, 0);
f740e6c9
KK
1020
1021 /* Clear any pending interrupts left by SAL/EFI */
1022 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1023 ia64_eoi();
1024
1da177e4
LT
1025#ifdef CONFIG_SMP
1026 normal_xtp();
1027#endif
1028
1029 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
2046b94e 1030 if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1da177e4 1031 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
a6c75b86 1032 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
2046b94e 1033 } else {
1da177e4
LT
1034 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1035 max_ctx = (1U << 15) - 1; /* use architected minimum */
1036 }
1037 while (max_ctx < ia64_ctx.max_ctx) {
1038 unsigned int old = ia64_ctx.max_ctx;
1039 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1040 break;
1041 }
1042
1043 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1044 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1045 "stacked regs\n");
1046 num_phys_stacked = 96;
1047 }
1048 /* size of physical stacked register partition plus 8 bytes: */
a0776ec8
KC
1049 if (num_phys_stacked > max_num_phys_stacked) {
1050 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1051 max_num_phys_stacked = num_phys_stacked;
1052 }
1da177e4 1053 platform_cpu_init();
6c4fa560 1054 pm_idle = default_idle;
1da177e4
LT
1055}
1056
244fd545 1057void __init
1da177e4
LT
1058check_bugs (void)
1059{
1060 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1061 (unsigned long) __end___mckinley_e9_bundles);
1062}
3ed3bce8
MD
1063
1064static int __init run_dmi_scan(void)
1065{
1066 dmi_scan_machine();
1067 return 0;
1068}
1069core_initcall(run_dmi_scan);