]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/ia64/kernel/setup.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[mirror_ubuntu-kernels.git] / arch / ia64 / kernel / setup.c
1 /*
2 * Architecture-specific setup.
3 *
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
24 */
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <linux/acpi.h>
30 #include <linux/bootmem.h>
31 #include <linux/console.h>
32 #include <linux/delay.h>
33 #include <linux/kernel.h>
34 #include <linux/reboot.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/string.h>
38 #include <linux/threads.h>
39 #include <linux/tty.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
44 #include <linux/pm.h>
45 #include <linux/cpufreq.h>
46
47 #include <asm/ia32.h>
48 #include <asm/machvec.h>
49 #include <asm/mca.h>
50 #include <asm/meminit.h>
51 #include <asm/page.h>
52 #include <asm/patch.h>
53 #include <asm/pgtable.h>
54 #include <asm/processor.h>
55 #include <asm/sal.h>
56 #include <asm/sections.h>
57 #include <asm/serial.h>
58 #include <asm/setup.h>
59 #include <asm/smp.h>
60 #include <asm/system.h>
61 #include <asm/unistd.h>
62 #include <asm/system.h>
63
64 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
65 # error "struct cpuinfo_ia64 too big!"
66 #endif
67
68 #ifdef CONFIG_SMP
69 unsigned long __per_cpu_offset[NR_CPUS];
70 EXPORT_SYMBOL(__per_cpu_offset);
71 #endif
72
73 extern void ia64_setup_printk_clock(void);
74
75 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
76 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
77 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
78 unsigned long ia64_cycles_per_usec;
79 struct ia64_boot_param *ia64_boot_param;
80 struct screen_info screen_info;
81 unsigned long vga_console_iobase;
82 unsigned long vga_console_membase;
83
84 static struct resource data_resource = {
85 .name = "Kernel data",
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
87 };
88
89 static struct resource code_resource = {
90 .name = "Kernel code",
91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
92 };
93 extern void efi_initialize_iomem_resources(struct resource *,
94 struct resource *);
95 extern char _text[], _end[], _etext[];
96
97 unsigned long ia64_max_cacheline_size;
98
99 int dma_get_cache_alignment(void)
100 {
101 return ia64_max_cacheline_size;
102 }
103 EXPORT_SYMBOL(dma_get_cache_alignment);
104
105 unsigned long ia64_iobase; /* virtual address for I/O accesses */
106 EXPORT_SYMBOL(ia64_iobase);
107 struct io_space io_space[MAX_IO_SPACES];
108 EXPORT_SYMBOL(io_space);
109 unsigned int num_io_spaces;
110
111 /*
112 * "flush_icache_range()" needs to know what processor dependent stride size to use
113 * when it makes i-cache(s) coherent with d-caches.
114 */
115 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
116 unsigned long ia64_i_cache_stride_shift = ~0;
117
118 /*
119 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
120 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
121 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
122 * address of the second buffer must be aligned to (merge_mask+1) in order to be
123 * mergeable). By default, we assume there is no I/O MMU which can merge physically
124 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
125 * page-size of 2^64.
126 */
127 unsigned long ia64_max_iommu_merge_mask = ~0UL;
128 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
129
130 /*
131 * We use a special marker for the end of memory and it uses the extra (+1) slot
132 */
133 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
134 int num_rsvd_regions;
135
136
137 /*
138 * Filter incoming memory segments based on the primitive map created from the boot
139 * parameters. Segments contained in the map are removed from the memory ranges. A
140 * caller-specified function is called with the memory ranges that remain after filtering.
141 * This routine does not assume the incoming segments are sorted.
142 */
143 int
144 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
145 {
146 unsigned long range_start, range_end, prev_start;
147 void (*func)(unsigned long, unsigned long, int);
148 int i;
149
150 #if IGNORE_PFN0
151 if (start == PAGE_OFFSET) {
152 printk(KERN_WARNING "warning: skipping physical page 0\n");
153 start += PAGE_SIZE;
154 if (start >= end) return 0;
155 }
156 #endif
157 /*
158 * lowest possible address(walker uses virtual)
159 */
160 prev_start = PAGE_OFFSET;
161 func = arg;
162
163 for (i = 0; i < num_rsvd_regions; ++i) {
164 range_start = max(start, prev_start);
165 range_end = min(end, rsvd_region[i].start);
166
167 if (range_start < range_end)
168 call_pernode_memory(__pa(range_start), range_end - range_start, func);
169
170 /* nothing more available in this segment */
171 if (range_end == end) return 0;
172
173 prev_start = rsvd_region[i].end;
174 }
175 /* end of memory marker allows full processing inside loop body */
176 return 0;
177 }
178
179 static void
180 sort_regions (struct rsvd_region *rsvd_region, int max)
181 {
182 int j;
183
184 /* simple bubble sorting */
185 while (max--) {
186 for (j = 0; j < max; ++j) {
187 if (rsvd_region[j].start > rsvd_region[j+1].start) {
188 struct rsvd_region tmp;
189 tmp = rsvd_region[j];
190 rsvd_region[j] = rsvd_region[j + 1];
191 rsvd_region[j + 1] = tmp;
192 }
193 }
194 }
195 }
196
197 /*
198 * Request address space for all standard resources
199 */
200 static int __init register_memory(void)
201 {
202 code_resource.start = ia64_tpa(_text);
203 code_resource.end = ia64_tpa(_etext) - 1;
204 data_resource.start = ia64_tpa(_etext);
205 data_resource.end = ia64_tpa(_end) - 1;
206 efi_initialize_iomem_resources(&code_resource, &data_resource);
207
208 return 0;
209 }
210
211 __initcall(register_memory);
212
213 /**
214 * reserve_memory - setup reserved memory areas
215 *
216 * Setup the reserved memory areas set aside for the boot parameters,
217 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
218 * see include/asm-ia64/meminit.h if you need to define more.
219 */
220 void
221 reserve_memory (void)
222 {
223 int n = 0;
224
225 /*
226 * none of the entries in this table overlap
227 */
228 rsvd_region[n].start = (unsigned long) ia64_boot_param;
229 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
230 n++;
231
232 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
233 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
234 n++;
235
236 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
237 rsvd_region[n].end = (rsvd_region[n].start
238 + strlen(__va(ia64_boot_param->command_line)) + 1);
239 n++;
240
241 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
242 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
243 n++;
244
245 #ifdef CONFIG_BLK_DEV_INITRD
246 if (ia64_boot_param->initrd_start) {
247 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
248 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
249 n++;
250 }
251 #endif
252
253 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
254 n++;
255
256 /* end of memory marker */
257 rsvd_region[n].start = ~0UL;
258 rsvd_region[n].end = ~0UL;
259 n++;
260
261 num_rsvd_regions = n;
262
263 sort_regions(rsvd_region, num_rsvd_regions);
264 }
265
266 /**
267 * find_initrd - get initrd parameters from the boot parameter structure
268 *
269 * Grab the initrd start and end from the boot parameter struct given us by
270 * the boot loader.
271 */
272 void
273 find_initrd (void)
274 {
275 #ifdef CONFIG_BLK_DEV_INITRD
276 if (ia64_boot_param->initrd_start) {
277 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
278 initrd_end = initrd_start+ia64_boot_param->initrd_size;
279
280 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
281 initrd_start, ia64_boot_param->initrd_size);
282 }
283 #endif
284 }
285
286 static void __init
287 io_port_init (void)
288 {
289 unsigned long phys_iobase;
290
291 /*
292 * Set `iobase' based on the EFI memory map or, failing that, the
293 * value firmware left in ar.k0.
294 *
295 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
296 * the port's virtual address, so ia32_load_state() loads it with a
297 * user virtual address. But in ia64 mode, glibc uses the
298 * *physical* address in ar.k0 to mmap the appropriate area from
299 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
300 * cases, user-mode can only use the legacy 0-64K I/O port space.
301 *
302 * ar.k0 is not involved in kernel I/O port accesses, which can use
303 * any of the I/O port spaces and are done via MMIO using the
304 * virtual mmio_base from the appropriate io_space[].
305 */
306 phys_iobase = efi_get_iobase();
307 if (!phys_iobase) {
308 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
309 printk(KERN_INFO "No I/O port range found in EFI memory map, "
310 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
311 }
312 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
313 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
314
315 /* setup legacy IO port space */
316 io_space[0].mmio_base = ia64_iobase;
317 io_space[0].sparse = 1;
318 num_io_spaces = 1;
319 }
320
321 /**
322 * early_console_setup - setup debugging console
323 *
324 * Consoles started here require little enough setup that we can start using
325 * them very early in the boot process, either right after the machine
326 * vector initialization, or even before if the drivers can detect their hw.
327 *
328 * Returns non-zero if a console couldn't be setup.
329 */
330 static inline int __init
331 early_console_setup (char *cmdline)
332 {
333 int earlycons = 0;
334
335 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
336 {
337 extern int sn_serial_console_early_setup(void);
338 if (!sn_serial_console_early_setup())
339 earlycons++;
340 }
341 #endif
342 #ifdef CONFIG_EFI_PCDP
343 if (!efi_setup_pcdp_console(cmdline))
344 earlycons++;
345 #endif
346 #ifdef CONFIG_SERIAL_8250_CONSOLE
347 if (!early_serial_console_init(cmdline))
348 earlycons++;
349 #endif
350
351 return (earlycons) ? 0 : -1;
352 }
353
354 static inline void
355 mark_bsp_online (void)
356 {
357 #ifdef CONFIG_SMP
358 /* If we register an early console, allow CPU 0 to printk */
359 cpu_set(smp_processor_id(), cpu_online_map);
360 #endif
361 }
362
363 #ifdef CONFIG_SMP
364 static void
365 check_for_logical_procs (void)
366 {
367 pal_logical_to_physical_t info;
368 s64 status;
369
370 status = ia64_pal_logical_to_phys(0, &info);
371 if (status == -1) {
372 printk(KERN_INFO "No logical to physical processor mapping "
373 "available\n");
374 return;
375 }
376 if (status) {
377 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
378 status);
379 return;
380 }
381 /*
382 * Total number of siblings that BSP has. Though not all of them
383 * may have booted successfully. The correct number of siblings
384 * booted is in info.overview_num_log.
385 */
386 smp_num_siblings = info.overview_tpc;
387 smp_num_cpucores = info.overview_cpp;
388 }
389 #endif
390
391 void __init
392 setup_arch (char **cmdline_p)
393 {
394 unw_init();
395
396 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
397
398 *cmdline_p = __va(ia64_boot_param->command_line);
399 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
400
401 efi_init();
402 io_port_init();
403
404 #ifdef CONFIG_IA64_GENERIC
405 {
406 const char *mvec_name = strstr (*cmdline_p, "machvec=");
407 char str[64];
408
409 if (mvec_name) {
410 const char *end;
411 size_t len;
412
413 mvec_name += 8;
414 end = strchr (mvec_name, ' ');
415 if (end)
416 len = end - mvec_name;
417 else
418 len = strlen (mvec_name);
419 len = min(len, sizeof (str) - 1);
420 strncpy (str, mvec_name, len);
421 str[len] = '\0';
422 mvec_name = str;
423 } else
424 mvec_name = acpi_get_sysname();
425 machvec_init(mvec_name);
426 }
427 #endif
428
429 if (early_console_setup(*cmdline_p) == 0)
430 mark_bsp_online();
431
432 parse_early_param();
433 #ifdef CONFIG_ACPI
434 /* Initialize the ACPI boot-time table parser */
435 acpi_table_init();
436 # ifdef CONFIG_ACPI_NUMA
437 acpi_numa_init();
438 # endif
439 #else
440 # ifdef CONFIG_SMP
441 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
442 # endif
443 #endif /* CONFIG_APCI_BOOT */
444
445 find_memory();
446
447 /* process SAL system table: */
448 ia64_sal_init(efi.sal_systab);
449
450 ia64_setup_printk_clock();
451
452 #ifdef CONFIG_SMP
453 cpu_physical_id(0) = hard_smp_processor_id();
454
455 cpu_set(0, cpu_sibling_map[0]);
456 cpu_set(0, cpu_core_map[0]);
457
458 check_for_logical_procs();
459 if (smp_num_cpucores > 1)
460 printk(KERN_INFO
461 "cpu package is Multi-Core capable: number of cores=%d\n",
462 smp_num_cpucores);
463 if (smp_num_siblings > 1)
464 printk(KERN_INFO
465 "cpu package is Multi-Threading capable: number of siblings=%d\n",
466 smp_num_siblings);
467 #endif
468
469 cpu_init(); /* initialize the bootstrap CPU */
470 mmu_context_init(); /* initialize context_id bitmap */
471
472 #ifdef CONFIG_ACPI
473 acpi_boot_init();
474 #endif
475
476 #ifdef CONFIG_VT
477 if (!conswitchp) {
478 # if defined(CONFIG_DUMMY_CONSOLE)
479 conswitchp = &dummy_con;
480 # endif
481 # if defined(CONFIG_VGA_CONSOLE)
482 /*
483 * Non-legacy systems may route legacy VGA MMIO range to system
484 * memory. vga_con probes the MMIO hole, so memory looks like
485 * a VGA device to it. The EFI memory map can tell us if it's
486 * memory so we can avoid this problem.
487 */
488 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
489 conswitchp = &vga_con;
490 # endif
491 }
492 #endif
493
494 /* enable IA-64 Machine Check Abort Handling unless disabled */
495 if (!strstr(saved_command_line, "nomca"))
496 ia64_mca_init();
497
498 platform_setup(cmdline_p);
499 paging_init();
500 }
501
502 /*
503 * Display cpu info for all cpu's.
504 */
505 static int
506 show_cpuinfo (struct seq_file *m, void *v)
507 {
508 #ifdef CONFIG_SMP
509 # define lpj c->loops_per_jiffy
510 # define cpunum c->cpu
511 #else
512 # define lpj loops_per_jiffy
513 # define cpunum 0
514 #endif
515 static struct {
516 unsigned long mask;
517 const char *feature_name;
518 } feature_bits[] = {
519 { 1UL << 0, "branchlong" },
520 { 1UL << 1, "spontaneous deferral"},
521 { 1UL << 2, "16-byte atomic ops" }
522 };
523 char family[32], features[128], *cp, sep;
524 struct cpuinfo_ia64 *c = v;
525 unsigned long mask;
526 unsigned long proc_freq;
527 int i;
528
529 mask = c->features;
530
531 switch (c->family) {
532 case 0x07: memcpy(family, "Itanium", 8); break;
533 case 0x1f: memcpy(family, "Itanium 2", 10); break;
534 default: sprintf(family, "%u", c->family); break;
535 }
536
537 /* build the feature string: */
538 memcpy(features, " standard", 10);
539 cp = features;
540 sep = 0;
541 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
542 if (mask & feature_bits[i].mask) {
543 if (sep)
544 *cp++ = sep;
545 sep = ',';
546 *cp++ = ' ';
547 strcpy(cp, feature_bits[i].feature_name);
548 cp += strlen(feature_bits[i].feature_name);
549 mask &= ~feature_bits[i].mask;
550 }
551 }
552 if (mask) {
553 /* print unknown features as a hex value: */
554 if (sep)
555 *cp++ = sep;
556 sprintf(cp, " 0x%lx", mask);
557 }
558
559 proc_freq = cpufreq_quick_get(cpunum);
560 if (!proc_freq)
561 proc_freq = c->proc_freq / 1000;
562
563 seq_printf(m,
564 "processor : %d\n"
565 "vendor : %s\n"
566 "arch : IA-64\n"
567 "family : %s\n"
568 "model : %u\n"
569 "revision : %u\n"
570 "archrev : %u\n"
571 "features :%s\n" /* don't change this---it _is_ right! */
572 "cpu number : %lu\n"
573 "cpu regs : %u\n"
574 "cpu MHz : %lu.%06lu\n"
575 "itc MHz : %lu.%06lu\n"
576 "BogoMIPS : %lu.%02lu\n",
577 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
578 features, c->ppn, c->number,
579 proc_freq / 1000, proc_freq % 1000,
580 c->itc_freq / 1000000, c->itc_freq % 1000000,
581 lpj*HZ/500000, (lpj*HZ/5000) % 100);
582 #ifdef CONFIG_SMP
583 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
584 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
585 seq_printf(m,
586 "physical id: %u\n"
587 "core id : %u\n"
588 "thread id : %u\n",
589 c->socket_id, c->core_id, c->thread_id);
590 #endif
591 seq_printf(m,"\n");
592
593 return 0;
594 }
595
596 static void *
597 c_start (struct seq_file *m, loff_t *pos)
598 {
599 #ifdef CONFIG_SMP
600 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
601 ++*pos;
602 #endif
603 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
604 }
605
606 static void *
607 c_next (struct seq_file *m, void *v, loff_t *pos)
608 {
609 ++*pos;
610 return c_start(m, pos);
611 }
612
613 static void
614 c_stop (struct seq_file *m, void *v)
615 {
616 }
617
618 struct seq_operations cpuinfo_op = {
619 .start = c_start,
620 .next = c_next,
621 .stop = c_stop,
622 .show = show_cpuinfo
623 };
624
625 void
626 identify_cpu (struct cpuinfo_ia64 *c)
627 {
628 union {
629 unsigned long bits[5];
630 struct {
631 /* id 0 & 1: */
632 char vendor[16];
633
634 /* id 2 */
635 u64 ppn; /* processor serial number */
636
637 /* id 3: */
638 unsigned number : 8;
639 unsigned revision : 8;
640 unsigned model : 8;
641 unsigned family : 8;
642 unsigned archrev : 8;
643 unsigned reserved : 24;
644
645 /* id 4: */
646 u64 features;
647 } field;
648 } cpuid;
649 pal_vm_info_1_u_t vm1;
650 pal_vm_info_2_u_t vm2;
651 pal_status_t status;
652 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
653 int i;
654
655 for (i = 0; i < 5; ++i)
656 cpuid.bits[i] = ia64_get_cpuid(i);
657
658 memcpy(c->vendor, cpuid.field.vendor, 16);
659 #ifdef CONFIG_SMP
660 c->cpu = smp_processor_id();
661
662 /* below default values will be overwritten by identify_siblings()
663 * for Multi-Threading/Multi-Core capable cpu's
664 */
665 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
666 c->socket_id = -1;
667
668 identify_siblings(c);
669 #endif
670 c->ppn = cpuid.field.ppn;
671 c->number = cpuid.field.number;
672 c->revision = cpuid.field.revision;
673 c->model = cpuid.field.model;
674 c->family = cpuid.field.family;
675 c->archrev = cpuid.field.archrev;
676 c->features = cpuid.field.features;
677
678 status = ia64_pal_vm_summary(&vm1, &vm2);
679 if (status == PAL_STATUS_SUCCESS) {
680 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
681 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
682 }
683 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
684 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
685 }
686
687 void
688 setup_per_cpu_areas (void)
689 {
690 /* start_kernel() requires this... */
691 #ifdef CONFIG_ACPI_HOTPLUG_CPU
692 prefill_possible_map();
693 #endif
694 }
695
696 /*
697 * Calculate the max. cache line size.
698 *
699 * In addition, the minimum of the i-cache stride sizes is calculated for
700 * "flush_icache_range()".
701 */
702 static void
703 get_max_cacheline_size (void)
704 {
705 unsigned long line_size, max = 1;
706 unsigned int cache_size = 0;
707 u64 l, levels, unique_caches;
708 pal_cache_config_info_t cci;
709 s64 status;
710
711 status = ia64_pal_cache_summary(&levels, &unique_caches);
712 if (status != 0) {
713 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
714 __FUNCTION__, status);
715 max = SMP_CACHE_BYTES;
716 /* Safest setup for "flush_icache_range()" */
717 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
718 goto out;
719 }
720
721 for (l = 0; l < levels; ++l) {
722 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
723 &cci);
724 if (status != 0) {
725 printk(KERN_ERR
726 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
727 __FUNCTION__, l, status);
728 max = SMP_CACHE_BYTES;
729 /* The safest setup for "flush_icache_range()" */
730 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
731 cci.pcci_unified = 1;
732 }
733 line_size = 1 << cci.pcci_line_size;
734 if (line_size > max)
735 max = line_size;
736 if (cache_size < cci.pcci_cache_size)
737 cache_size = cci.pcci_cache_size;
738 if (!cci.pcci_unified) {
739 status = ia64_pal_cache_config_info(l,
740 /* cache_type (instruction)= */ 1,
741 &cci);
742 if (status != 0) {
743 printk(KERN_ERR
744 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
745 __FUNCTION__, l, status);
746 /* The safest setup for "flush_icache_range()" */
747 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
748 }
749 }
750 if (cci.pcci_stride < ia64_i_cache_stride_shift)
751 ia64_i_cache_stride_shift = cci.pcci_stride;
752 }
753 out:
754 #ifdef CONFIG_SMP
755 max_cache_size = max(max_cache_size, cache_size);
756 #endif
757 if (max > ia64_max_cacheline_size)
758 ia64_max_cacheline_size = max;
759 }
760
761 /*
762 * cpu_init() initializes state that is per-CPU. This function acts
763 * as a 'CPU state barrier', nothing should get across.
764 */
765 void
766 cpu_init (void)
767 {
768 extern void __devinit ia64_mmu_init (void *);
769 unsigned long num_phys_stacked;
770 pal_vm_info_2_u_t vmi;
771 unsigned int max_ctx;
772 struct cpuinfo_ia64 *cpu_info;
773 void *cpu_data;
774
775 cpu_data = per_cpu_init();
776
777 /*
778 * We set ar.k3 so that assembly code in MCA handler can compute
779 * physical addresses of per cpu variables with a simple:
780 * phys = ar.k3 + &per_cpu_var
781 */
782 ia64_set_kr(IA64_KR_PER_CPU_DATA,
783 ia64_tpa(cpu_data) - (long) __per_cpu_start);
784
785 get_max_cacheline_size();
786
787 /*
788 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
789 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
790 * depends on the data returned by identify_cpu(). We break the dependency by
791 * accessing cpu_data() through the canonical per-CPU address.
792 */
793 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
794 identify_cpu(cpu_info);
795
796 #ifdef CONFIG_MCKINLEY
797 {
798 # define FEATURE_SET 16
799 struct ia64_pal_retval iprv;
800
801 if (cpu_info->family == 0x1f) {
802 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
803 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
804 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
805 (iprv.v1 | 0x80), FEATURE_SET, 0);
806 }
807 }
808 #endif
809
810 /* Clear the stack memory reserved for pt_regs: */
811 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
812
813 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
814
815 /*
816 * Initialize the page-table base register to a global
817 * directory with all zeroes. This ensure that we can handle
818 * TLB-misses to user address-space even before we created the
819 * first user address-space. This may happen, e.g., due to
820 * aggressive use of lfetch.fault.
821 */
822 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
823
824 /*
825 * Initialize default control register to defer speculative faults except
826 * for those arising from TLB misses, which are not deferred. The
827 * kernel MUST NOT depend on a particular setting of these bits (in other words,
828 * the kernel must have recovery code for all speculative accesses). Turn on
829 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
830 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
831 * be fine).
832 */
833 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
834 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
835 atomic_inc(&init_mm.mm_count);
836 current->active_mm = &init_mm;
837 if (current->mm)
838 BUG();
839
840 ia64_mmu_init(ia64_imva(cpu_data));
841 ia64_mca_cpu_init(ia64_imva(cpu_data));
842
843 #ifdef CONFIG_IA32_SUPPORT
844 ia32_cpu_init();
845 #endif
846
847 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
848 ia64_set_itc(0);
849
850 /* disable all local interrupt sources: */
851 ia64_set_itv(1 << 16);
852 ia64_set_lrr0(1 << 16);
853 ia64_set_lrr1(1 << 16);
854 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
855 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
856
857 /* clear TPR & XTP to enable all interrupt classes: */
858 ia64_setreg(_IA64_REG_CR_TPR, 0);
859 #ifdef CONFIG_SMP
860 normal_xtp();
861 #endif
862
863 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
864 if (ia64_pal_vm_summary(NULL, &vmi) == 0)
865 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
866 else {
867 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
868 max_ctx = (1U << 15) - 1; /* use architected minimum */
869 }
870 while (max_ctx < ia64_ctx.max_ctx) {
871 unsigned int old = ia64_ctx.max_ctx;
872 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
873 break;
874 }
875
876 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
877 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
878 "stacked regs\n");
879 num_phys_stacked = 96;
880 }
881 /* size of physical stacked register partition plus 8 bytes: */
882 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
883 platform_cpu_init();
884 pm_idle = default_idle;
885 }
886
887 /*
888 * On SMP systems, when the scheduler does migration-cost autodetection,
889 * it needs a way to flush as much of the CPU's caches as possible.
890 */
891 void sched_cacheflush(void)
892 {
893 ia64_sal_cache_flush(3);
894 }
895
896 void
897 check_bugs (void)
898 {
899 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
900 (unsigned long) __end___mckinley_e9_bundles);
901 }