]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/ia64/kernel/setup.c
Merge branch 'tracing/ftrace' into auto-ftrace-next
[mirror_ubuntu-zesty-kernel.git] / arch / ia64 / kernel / setup.c
1 /*
2 * Architecture-specific setup.
3 *
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
24 */
25 #include <linux/module.h>
26 #include <linux/init.h>
27
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
44 #include <linux/pm.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
48
49 #include <asm/ia32.h>
50 #include <asm/machvec.h>
51 #include <asm/mca.h>
52 #include <asm/meminit.h>
53 #include <asm/page.h>
54 #include <asm/patch.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
57 #include <asm/sal.h>
58 #include <asm/sections.h>
59 #include <asm/setup.h>
60 #include <asm/smp.h>
61 #include <asm/system.h>
62 #include <asm/tlbflush.h>
63 #include <asm/unistd.h>
64 #include <asm/hpsim.h>
65
66 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
67 # error "struct cpuinfo_ia64 too big!"
68 #endif
69
70 #ifdef CONFIG_SMP
71 unsigned long __per_cpu_offset[NR_CPUS];
72 EXPORT_SYMBOL(__per_cpu_offset);
73 #endif
74
75 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
76 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
77 unsigned long ia64_cycles_per_usec;
78 struct ia64_boot_param *ia64_boot_param;
79 struct screen_info screen_info;
80 unsigned long vga_console_iobase;
81 unsigned long vga_console_membase;
82
83 static struct resource data_resource = {
84 .name = "Kernel data",
85 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
86 };
87
88 static struct resource code_resource = {
89 .name = "Kernel code",
90 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
91 };
92
93 static struct resource bss_resource = {
94 .name = "Kernel bss",
95 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
96 };
97
98 unsigned long ia64_max_cacheline_size;
99
100 int dma_get_cache_alignment(void)
101 {
102 return ia64_max_cacheline_size;
103 }
104 EXPORT_SYMBOL(dma_get_cache_alignment);
105
106 unsigned long ia64_iobase; /* virtual address for I/O accesses */
107 EXPORT_SYMBOL(ia64_iobase);
108 struct io_space io_space[MAX_IO_SPACES];
109 EXPORT_SYMBOL(io_space);
110 unsigned int num_io_spaces;
111
112 /*
113 * "flush_icache_range()" needs to know what processor dependent stride size to use
114 * when it makes i-cache(s) coherent with d-caches.
115 */
116 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
117 unsigned long ia64_i_cache_stride_shift = ~0;
118
119 /*
120 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
121 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
122 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
123 * address of the second buffer must be aligned to (merge_mask+1) in order to be
124 * mergeable). By default, we assume there is no I/O MMU which can merge physically
125 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
126 * page-size of 2^64.
127 */
128 unsigned long ia64_max_iommu_merge_mask = ~0UL;
129 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
130
131 /*
132 * We use a special marker for the end of memory and it uses the extra (+1) slot
133 */
134 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
135 int num_rsvd_regions __initdata;
136
137
138 /*
139 * Filter incoming memory segments based on the primitive map created from the boot
140 * parameters. Segments contained in the map are removed from the memory ranges. A
141 * caller-specified function is called with the memory ranges that remain after filtering.
142 * This routine does not assume the incoming segments are sorted.
143 */
144 int __init
145 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
146 {
147 unsigned long range_start, range_end, prev_start;
148 void (*func)(unsigned long, unsigned long, int);
149 int i;
150
151 #if IGNORE_PFN0
152 if (start == PAGE_OFFSET) {
153 printk(KERN_WARNING "warning: skipping physical page 0\n");
154 start += PAGE_SIZE;
155 if (start >= end) return 0;
156 }
157 #endif
158 /*
159 * lowest possible address(walker uses virtual)
160 */
161 prev_start = PAGE_OFFSET;
162 func = arg;
163
164 for (i = 0; i < num_rsvd_regions; ++i) {
165 range_start = max(start, prev_start);
166 range_end = min(end, rsvd_region[i].start);
167
168 if (range_start < range_end)
169 call_pernode_memory(__pa(range_start), range_end - range_start, func);
170
171 /* nothing more available in this segment */
172 if (range_end == end) return 0;
173
174 prev_start = rsvd_region[i].end;
175 }
176 /* end of memory marker allows full processing inside loop body */
177 return 0;
178 }
179
180 /*
181 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
182 * are not filtered out.
183 */
184 int __init
185 filter_memory(unsigned long start, unsigned long end, void *arg)
186 {
187 void (*func)(unsigned long, unsigned long, int);
188
189 #if IGNORE_PFN0
190 if (start == PAGE_OFFSET) {
191 printk(KERN_WARNING "warning: skipping physical page 0\n");
192 start += PAGE_SIZE;
193 if (start >= end)
194 return 0;
195 }
196 #endif
197 func = arg;
198 if (start < end)
199 call_pernode_memory(__pa(start), end - start, func);
200 return 0;
201 }
202
203 static void __init
204 sort_regions (struct rsvd_region *rsvd_region, int max)
205 {
206 int j;
207
208 /* simple bubble sorting */
209 while (max--) {
210 for (j = 0; j < max; ++j) {
211 if (rsvd_region[j].start > rsvd_region[j+1].start) {
212 struct rsvd_region tmp;
213 tmp = rsvd_region[j];
214 rsvd_region[j] = rsvd_region[j + 1];
215 rsvd_region[j + 1] = tmp;
216 }
217 }
218 }
219 }
220
221 /*
222 * Request address space for all standard resources
223 */
224 static int __init register_memory(void)
225 {
226 code_resource.start = ia64_tpa(_text);
227 code_resource.end = ia64_tpa(_etext) - 1;
228 data_resource.start = ia64_tpa(_etext);
229 data_resource.end = ia64_tpa(_edata) - 1;
230 bss_resource.start = ia64_tpa(__bss_start);
231 bss_resource.end = ia64_tpa(_end) - 1;
232 efi_initialize_iomem_resources(&code_resource, &data_resource,
233 &bss_resource);
234
235 return 0;
236 }
237
238 __initcall(register_memory);
239
240
241 #ifdef CONFIG_KEXEC
242
243 /*
244 * This function checks if the reserved crashkernel is allowed on the specific
245 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
246 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
247 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
248 * in kdump case. See the comment in sba_init() in sba_iommu.c.
249 *
250 * So, the only machvec that really supports loading the kdump kernel
251 * over 4 GB is "sn2".
252 */
253 static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
254 {
255 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
256 return 1;
257 else
258 return pbase < (1UL << 32);
259 }
260
261 static void __init setup_crashkernel(unsigned long total, int *n)
262 {
263 unsigned long long base = 0, size = 0;
264 int ret;
265
266 ret = parse_crashkernel(boot_command_line, total,
267 &size, &base);
268 if (ret == 0 && size > 0) {
269 if (!base) {
270 sort_regions(rsvd_region, *n);
271 base = kdump_find_rsvd_region(size,
272 rsvd_region, *n);
273 }
274
275 if (!check_crashkernel_memory(base, size)) {
276 pr_warning("crashkernel: There would be kdump memory "
277 "at %ld GB but this is unusable because it "
278 "must\nbe below 4 GB. Change the memory "
279 "configuration of the machine.\n",
280 (unsigned long)(base >> 30));
281 return;
282 }
283
284 if (base != ~0UL) {
285 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
286 "for crashkernel (System RAM: %ldMB)\n",
287 (unsigned long)(size >> 20),
288 (unsigned long)(base >> 20),
289 (unsigned long)(total >> 20));
290 rsvd_region[*n].start =
291 (unsigned long)__va(base);
292 rsvd_region[*n].end =
293 (unsigned long)__va(base + size);
294 (*n)++;
295 crashk_res.start = base;
296 crashk_res.end = base + size - 1;
297 }
298 }
299 efi_memmap_res.start = ia64_boot_param->efi_memmap;
300 efi_memmap_res.end = efi_memmap_res.start +
301 ia64_boot_param->efi_memmap_size;
302 boot_param_res.start = __pa(ia64_boot_param);
303 boot_param_res.end = boot_param_res.start +
304 sizeof(*ia64_boot_param);
305 }
306 #else
307 static inline void __init setup_crashkernel(unsigned long total, int *n)
308 {}
309 #endif
310
311 /**
312 * reserve_memory - setup reserved memory areas
313 *
314 * Setup the reserved memory areas set aside for the boot parameters,
315 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
316 * see include/asm-ia64/meminit.h if you need to define more.
317 */
318 void __init
319 reserve_memory (void)
320 {
321 int n = 0;
322 unsigned long total_memory;
323
324 /*
325 * none of the entries in this table overlap
326 */
327 rsvd_region[n].start = (unsigned long) ia64_boot_param;
328 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
329 n++;
330
331 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
332 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
333 n++;
334
335 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
336 rsvd_region[n].end = (rsvd_region[n].start
337 + strlen(__va(ia64_boot_param->command_line)) + 1);
338 n++;
339
340 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
341 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
342 n++;
343
344 #ifdef CONFIG_BLK_DEV_INITRD
345 if (ia64_boot_param->initrd_start) {
346 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
347 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
348 n++;
349 }
350 #endif
351
352 #ifdef CONFIG_PROC_VMCORE
353 if (reserve_elfcorehdr(&rsvd_region[n].start,
354 &rsvd_region[n].end) == 0)
355 n++;
356 #endif
357
358 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
359 n++;
360
361 setup_crashkernel(total_memory, &n);
362
363 /* end of memory marker */
364 rsvd_region[n].start = ~0UL;
365 rsvd_region[n].end = ~0UL;
366 n++;
367
368 num_rsvd_regions = n;
369 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
370
371 sort_regions(rsvd_region, num_rsvd_regions);
372 }
373
374
375 /**
376 * find_initrd - get initrd parameters from the boot parameter structure
377 *
378 * Grab the initrd start and end from the boot parameter struct given us by
379 * the boot loader.
380 */
381 void __init
382 find_initrd (void)
383 {
384 #ifdef CONFIG_BLK_DEV_INITRD
385 if (ia64_boot_param->initrd_start) {
386 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
387 initrd_end = initrd_start+ia64_boot_param->initrd_size;
388
389 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
390 initrd_start, ia64_boot_param->initrd_size);
391 }
392 #endif
393 }
394
395 static void __init
396 io_port_init (void)
397 {
398 unsigned long phys_iobase;
399
400 /*
401 * Set `iobase' based on the EFI memory map or, failing that, the
402 * value firmware left in ar.k0.
403 *
404 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
405 * the port's virtual address, so ia32_load_state() loads it with a
406 * user virtual address. But in ia64 mode, glibc uses the
407 * *physical* address in ar.k0 to mmap the appropriate area from
408 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
409 * cases, user-mode can only use the legacy 0-64K I/O port space.
410 *
411 * ar.k0 is not involved in kernel I/O port accesses, which can use
412 * any of the I/O port spaces and are done via MMIO using the
413 * virtual mmio_base from the appropriate io_space[].
414 */
415 phys_iobase = efi_get_iobase();
416 if (!phys_iobase) {
417 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
418 printk(KERN_INFO "No I/O port range found in EFI memory map, "
419 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
420 }
421 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
422 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
423
424 /* setup legacy IO port space */
425 io_space[0].mmio_base = ia64_iobase;
426 io_space[0].sparse = 1;
427 num_io_spaces = 1;
428 }
429
430 /**
431 * early_console_setup - setup debugging console
432 *
433 * Consoles started here require little enough setup that we can start using
434 * them very early in the boot process, either right after the machine
435 * vector initialization, or even before if the drivers can detect their hw.
436 *
437 * Returns non-zero if a console couldn't be setup.
438 */
439 static inline int __init
440 early_console_setup (char *cmdline)
441 {
442 int earlycons = 0;
443
444 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
445 {
446 extern int sn_serial_console_early_setup(void);
447 if (!sn_serial_console_early_setup())
448 earlycons++;
449 }
450 #endif
451 #ifdef CONFIG_EFI_PCDP
452 if (!efi_setup_pcdp_console(cmdline))
453 earlycons++;
454 #endif
455 if (!simcons_register())
456 earlycons++;
457
458 return (earlycons) ? 0 : -1;
459 }
460
461 static inline void
462 mark_bsp_online (void)
463 {
464 #ifdef CONFIG_SMP
465 /* If we register an early console, allow CPU 0 to printk */
466 cpu_set(smp_processor_id(), cpu_online_map);
467 #endif
468 }
469
470 static __initdata int nomca;
471 static __init int setup_nomca(char *s)
472 {
473 nomca = 1;
474 return 0;
475 }
476 early_param("nomca", setup_nomca);
477
478 #ifdef CONFIG_PROC_VMCORE
479 /* elfcorehdr= specifies the location of elf core header
480 * stored by the crashed kernel.
481 */
482 static int __init parse_elfcorehdr(char *arg)
483 {
484 if (!arg)
485 return -EINVAL;
486
487 elfcorehdr_addr = memparse(arg, &arg);
488 return 0;
489 }
490 early_param("elfcorehdr", parse_elfcorehdr);
491
492 int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
493 {
494 unsigned long length;
495
496 /* We get the address using the kernel command line,
497 * but the size is extracted from the EFI tables.
498 * Both address and size are required for reservation
499 * to work properly.
500 */
501
502 if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
503 return -EINVAL;
504
505 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
506 elfcorehdr_addr = ELFCORE_ADDR_MAX;
507 return -EINVAL;
508 }
509
510 *start = (unsigned long)__va(elfcorehdr_addr);
511 *end = *start + length;
512 return 0;
513 }
514
515 #endif /* CONFIG_PROC_VMCORE */
516
517 void __init
518 setup_arch (char **cmdline_p)
519 {
520 unw_init();
521
522 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
523
524 *cmdline_p = __va(ia64_boot_param->command_line);
525 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
526
527 efi_init();
528 io_port_init();
529
530 #ifdef CONFIG_IA64_GENERIC
531 /* machvec needs to be parsed from the command line
532 * before parse_early_param() is called to ensure
533 * that ia64_mv is initialised before any command line
534 * settings may cause console setup to occur
535 */
536 machvec_init_from_cmdline(*cmdline_p);
537 #endif
538
539 parse_early_param();
540
541 if (early_console_setup(*cmdline_p) == 0)
542 mark_bsp_online();
543
544 #ifdef CONFIG_ACPI
545 /* Initialize the ACPI boot-time table parser */
546 acpi_table_init();
547 # ifdef CONFIG_ACPI_NUMA
548 acpi_numa_init();
549 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
550 32 : cpus_weight(early_cpu_possible_map)),
551 additional_cpus > 0 ? additional_cpus : 0);
552 # endif
553 #else
554 # ifdef CONFIG_SMP
555 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
556 # endif
557 #endif /* CONFIG_APCI_BOOT */
558
559 find_memory();
560
561 /* process SAL system table: */
562 ia64_sal_init(__va(efi.sal_systab));
563
564 #ifdef CONFIG_ITANIUM
565 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
566 #else
567 {
568 u64 num_phys_stacked;
569
570 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
571 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
572 }
573 #endif
574
575 #ifdef CONFIG_SMP
576 cpu_physical_id(0) = hard_smp_processor_id();
577 #endif
578
579 cpu_init(); /* initialize the bootstrap CPU */
580 mmu_context_init(); /* initialize context_id bitmap */
581
582 #ifdef CONFIG_ACPI
583 acpi_boot_init();
584 #endif
585
586 #ifdef CONFIG_VT
587 if (!conswitchp) {
588 # if defined(CONFIG_DUMMY_CONSOLE)
589 conswitchp = &dummy_con;
590 # endif
591 # if defined(CONFIG_VGA_CONSOLE)
592 /*
593 * Non-legacy systems may route legacy VGA MMIO range to system
594 * memory. vga_con probes the MMIO hole, so memory looks like
595 * a VGA device to it. The EFI memory map can tell us if it's
596 * memory so we can avoid this problem.
597 */
598 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
599 conswitchp = &vga_con;
600 # endif
601 }
602 #endif
603
604 /* enable IA-64 Machine Check Abort Handling unless disabled */
605 if (!nomca)
606 ia64_mca_init();
607
608 platform_setup(cmdline_p);
609 check_sal_cache_flush();
610 paging_init();
611 }
612
613 /*
614 * Display cpu info for all CPUs.
615 */
616 static int
617 show_cpuinfo (struct seq_file *m, void *v)
618 {
619 #ifdef CONFIG_SMP
620 # define lpj c->loops_per_jiffy
621 # define cpunum c->cpu
622 #else
623 # define lpj loops_per_jiffy
624 # define cpunum 0
625 #endif
626 static struct {
627 unsigned long mask;
628 const char *feature_name;
629 } feature_bits[] = {
630 { 1UL << 0, "branchlong" },
631 { 1UL << 1, "spontaneous deferral"},
632 { 1UL << 2, "16-byte atomic ops" }
633 };
634 char features[128], *cp, *sep;
635 struct cpuinfo_ia64 *c = v;
636 unsigned long mask;
637 unsigned long proc_freq;
638 int i, size;
639
640 mask = c->features;
641
642 /* build the feature string: */
643 memcpy(features, "standard", 9);
644 cp = features;
645 size = sizeof(features);
646 sep = "";
647 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
648 if (mask & feature_bits[i].mask) {
649 cp += snprintf(cp, size, "%s%s", sep,
650 feature_bits[i].feature_name),
651 sep = ", ";
652 mask &= ~feature_bits[i].mask;
653 size = sizeof(features) - (cp - features);
654 }
655 }
656 if (mask && size > 1) {
657 /* print unknown features as a hex value */
658 snprintf(cp, size, "%s0x%lx", sep, mask);
659 }
660
661 proc_freq = cpufreq_quick_get(cpunum);
662 if (!proc_freq)
663 proc_freq = c->proc_freq / 1000;
664
665 seq_printf(m,
666 "processor : %d\n"
667 "vendor : %s\n"
668 "arch : IA-64\n"
669 "family : %u\n"
670 "model : %u\n"
671 "model name : %s\n"
672 "revision : %u\n"
673 "archrev : %u\n"
674 "features : %s\n"
675 "cpu number : %lu\n"
676 "cpu regs : %u\n"
677 "cpu MHz : %lu.%03lu\n"
678 "itc MHz : %lu.%06lu\n"
679 "BogoMIPS : %lu.%02lu\n",
680 cpunum, c->vendor, c->family, c->model,
681 c->model_name, c->revision, c->archrev,
682 features, c->ppn, c->number,
683 proc_freq / 1000, proc_freq % 1000,
684 c->itc_freq / 1000000, c->itc_freq % 1000000,
685 lpj*HZ/500000, (lpj*HZ/5000) % 100);
686 #ifdef CONFIG_SMP
687 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
688 if (c->socket_id != -1)
689 seq_printf(m, "physical id: %u\n", c->socket_id);
690 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
691 seq_printf(m,
692 "core id : %u\n"
693 "thread id : %u\n",
694 c->core_id, c->thread_id);
695 #endif
696 seq_printf(m,"\n");
697
698 return 0;
699 }
700
701 static void *
702 c_start (struct seq_file *m, loff_t *pos)
703 {
704 #ifdef CONFIG_SMP
705 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
706 ++*pos;
707 #endif
708 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
709 }
710
711 static void *
712 c_next (struct seq_file *m, void *v, loff_t *pos)
713 {
714 ++*pos;
715 return c_start(m, pos);
716 }
717
718 static void
719 c_stop (struct seq_file *m, void *v)
720 {
721 }
722
723 const struct seq_operations cpuinfo_op = {
724 .start = c_start,
725 .next = c_next,
726 .stop = c_stop,
727 .show = show_cpuinfo
728 };
729
730 #define MAX_BRANDS 8
731 static char brandname[MAX_BRANDS][128];
732
733 static char * __cpuinit
734 get_model_name(__u8 family, __u8 model)
735 {
736 static int overflow;
737 char brand[128];
738 int i;
739
740 memcpy(brand, "Unknown", 8);
741 if (ia64_pal_get_brand_info(brand)) {
742 if (family == 0x7)
743 memcpy(brand, "Merced", 7);
744 else if (family == 0x1f) switch (model) {
745 case 0: memcpy(brand, "McKinley", 9); break;
746 case 1: memcpy(brand, "Madison", 8); break;
747 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
748 }
749 }
750 for (i = 0; i < MAX_BRANDS; i++)
751 if (strcmp(brandname[i], brand) == 0)
752 return brandname[i];
753 for (i = 0; i < MAX_BRANDS; i++)
754 if (brandname[i][0] == '\0')
755 return strcpy(brandname[i], brand);
756 if (overflow++ == 0)
757 printk(KERN_ERR
758 "%s: Table overflow. Some processor model information will be missing\n",
759 __func__);
760 return "Unknown";
761 }
762
763 static void __cpuinit
764 identify_cpu (struct cpuinfo_ia64 *c)
765 {
766 union {
767 unsigned long bits[5];
768 struct {
769 /* id 0 & 1: */
770 char vendor[16];
771
772 /* id 2 */
773 u64 ppn; /* processor serial number */
774
775 /* id 3: */
776 unsigned number : 8;
777 unsigned revision : 8;
778 unsigned model : 8;
779 unsigned family : 8;
780 unsigned archrev : 8;
781 unsigned reserved : 24;
782
783 /* id 4: */
784 u64 features;
785 } field;
786 } cpuid;
787 pal_vm_info_1_u_t vm1;
788 pal_vm_info_2_u_t vm2;
789 pal_status_t status;
790 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
791 int i;
792 for (i = 0; i < 5; ++i)
793 cpuid.bits[i] = ia64_get_cpuid(i);
794
795 memcpy(c->vendor, cpuid.field.vendor, 16);
796 #ifdef CONFIG_SMP
797 c->cpu = smp_processor_id();
798
799 /* below default values will be overwritten by identify_siblings()
800 * for Multi-Threading/Multi-Core capable CPUs
801 */
802 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
803 c->socket_id = -1;
804
805 identify_siblings(c);
806
807 if (c->threads_per_core > smp_num_siblings)
808 smp_num_siblings = c->threads_per_core;
809 #endif
810 c->ppn = cpuid.field.ppn;
811 c->number = cpuid.field.number;
812 c->revision = cpuid.field.revision;
813 c->model = cpuid.field.model;
814 c->family = cpuid.field.family;
815 c->archrev = cpuid.field.archrev;
816 c->features = cpuid.field.features;
817 c->model_name = get_model_name(c->family, c->model);
818
819 status = ia64_pal_vm_summary(&vm1, &vm2);
820 if (status == PAL_STATUS_SUCCESS) {
821 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
822 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
823 }
824 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
825 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
826 }
827
828 void __init
829 setup_per_cpu_areas (void)
830 {
831 /* start_kernel() requires this... */
832 #ifdef CONFIG_ACPI_HOTPLUG_CPU
833 prefill_possible_map();
834 #endif
835 }
836
837 /*
838 * Calculate the max. cache line size.
839 *
840 * In addition, the minimum of the i-cache stride sizes is calculated for
841 * "flush_icache_range()".
842 */
843 static void __cpuinit
844 get_max_cacheline_size (void)
845 {
846 unsigned long line_size, max = 1;
847 u64 l, levels, unique_caches;
848 pal_cache_config_info_t cci;
849 s64 status;
850
851 status = ia64_pal_cache_summary(&levels, &unique_caches);
852 if (status != 0) {
853 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
854 __func__, status);
855 max = SMP_CACHE_BYTES;
856 /* Safest setup for "flush_icache_range()" */
857 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
858 goto out;
859 }
860
861 for (l = 0; l < levels; ++l) {
862 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
863 &cci);
864 if (status != 0) {
865 printk(KERN_ERR
866 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
867 __func__, l, status);
868 max = SMP_CACHE_BYTES;
869 /* The safest setup for "flush_icache_range()" */
870 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
871 cci.pcci_unified = 1;
872 }
873 line_size = 1 << cci.pcci_line_size;
874 if (line_size > max)
875 max = line_size;
876 if (!cci.pcci_unified) {
877 status = ia64_pal_cache_config_info(l,
878 /* cache_type (instruction)= */ 1,
879 &cci);
880 if (status != 0) {
881 printk(KERN_ERR
882 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
883 __func__, l, status);
884 /* The safest setup for "flush_icache_range()" */
885 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
886 }
887 }
888 if (cci.pcci_stride < ia64_i_cache_stride_shift)
889 ia64_i_cache_stride_shift = cci.pcci_stride;
890 }
891 out:
892 if (max > ia64_max_cacheline_size)
893 ia64_max_cacheline_size = max;
894 }
895
896 /*
897 * cpu_init() initializes state that is per-CPU. This function acts
898 * as a 'CPU state barrier', nothing should get across.
899 */
900 void __cpuinit
901 cpu_init (void)
902 {
903 extern void __cpuinit ia64_mmu_init (void *);
904 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
905 unsigned long num_phys_stacked;
906 pal_vm_info_2_u_t vmi;
907 unsigned int max_ctx;
908 struct cpuinfo_ia64 *cpu_info;
909 void *cpu_data;
910
911 cpu_data = per_cpu_init();
912 #ifdef CONFIG_SMP
913 /*
914 * insert boot cpu into sibling and core mapes
915 * (must be done after per_cpu area is setup)
916 */
917 if (smp_processor_id() == 0) {
918 cpu_set(0, per_cpu(cpu_sibling_map, 0));
919 cpu_set(0, cpu_core_map[0]);
920 }
921 #endif
922
923 /*
924 * We set ar.k3 so that assembly code in MCA handler can compute
925 * physical addresses of per cpu variables with a simple:
926 * phys = ar.k3 + &per_cpu_var
927 */
928 ia64_set_kr(IA64_KR_PER_CPU_DATA,
929 ia64_tpa(cpu_data) - (long) __per_cpu_start);
930
931 get_max_cacheline_size();
932
933 /*
934 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
935 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
936 * depends on the data returned by identify_cpu(). We break the dependency by
937 * accessing cpu_data() through the canonical per-CPU address.
938 */
939 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
940 identify_cpu(cpu_info);
941
942 #ifdef CONFIG_MCKINLEY
943 {
944 # define FEATURE_SET 16
945 struct ia64_pal_retval iprv;
946
947 if (cpu_info->family == 0x1f) {
948 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
949 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
950 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
951 (iprv.v1 | 0x80), FEATURE_SET, 0);
952 }
953 }
954 #endif
955
956 /* Clear the stack memory reserved for pt_regs: */
957 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
958
959 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
960
961 /*
962 * Initialize the page-table base register to a global
963 * directory with all zeroes. This ensure that we can handle
964 * TLB-misses to user address-space even before we created the
965 * first user address-space. This may happen, e.g., due to
966 * aggressive use of lfetch.fault.
967 */
968 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
969
970 /*
971 * Initialize default control register to defer speculative faults except
972 * for those arising from TLB misses, which are not deferred. The
973 * kernel MUST NOT depend on a particular setting of these bits (in other words,
974 * the kernel must have recovery code for all speculative accesses). Turn on
975 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
976 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
977 * be fine).
978 */
979 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
980 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
981 atomic_inc(&init_mm.mm_count);
982 current->active_mm = &init_mm;
983 if (current->mm)
984 BUG();
985
986 ia64_mmu_init(ia64_imva(cpu_data));
987 ia64_mca_cpu_init(ia64_imva(cpu_data));
988
989 #ifdef CONFIG_IA32_SUPPORT
990 ia32_cpu_init();
991 #endif
992
993 /* Clear ITC to eliminate sched_clock() overflows in human time. */
994 ia64_set_itc(0);
995
996 /* disable all local interrupt sources: */
997 ia64_set_itv(1 << 16);
998 ia64_set_lrr0(1 << 16);
999 ia64_set_lrr1(1 << 16);
1000 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1001 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1002
1003 /* clear TPR & XTP to enable all interrupt classes: */
1004 ia64_setreg(_IA64_REG_CR_TPR, 0);
1005
1006 /* Clear any pending interrupts left by SAL/EFI */
1007 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1008 ia64_eoi();
1009
1010 #ifdef CONFIG_SMP
1011 normal_xtp();
1012 #endif
1013
1014 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1015 if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1016 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1017 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
1018 } else {
1019 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1020 max_ctx = (1U << 15) - 1; /* use architected minimum */
1021 }
1022 while (max_ctx < ia64_ctx.max_ctx) {
1023 unsigned int old = ia64_ctx.max_ctx;
1024 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1025 break;
1026 }
1027
1028 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1029 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1030 "stacked regs\n");
1031 num_phys_stacked = 96;
1032 }
1033 /* size of physical stacked register partition plus 8 bytes: */
1034 if (num_phys_stacked > max_num_phys_stacked) {
1035 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1036 max_num_phys_stacked = num_phys_stacked;
1037 }
1038 platform_cpu_init();
1039 pm_idle = default_idle;
1040 }
1041
1042 void __init
1043 check_bugs (void)
1044 {
1045 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1046 (unsigned long) __end___mckinley_e9_bundles);
1047 }
1048
1049 static int __init run_dmi_scan(void)
1050 {
1051 dmi_scan_machine();
1052 return 0;
1053 }
1054 core_initcall(run_dmi_scan);