2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
8 * Copyright (C) 1999 VA Linux Systems
9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
11 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
12 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
13 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
14 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
15 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
16 * 01/07/99 S.Eranian added the support for command line argument
17 * 06/24/99 W.Drummond added boot_cpu_data.
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
23 #include <linux/acpi.h>
24 #include <linux/bootmem.h>
25 #include <linux/console.h>
26 #include <linux/delay.h>
27 #include <linux/kernel.h>
28 #include <linux/reboot.h>
29 #include <linux/sched.h>
30 #include <linux/seq_file.h>
31 #include <linux/string.h>
32 #include <linux/threads.h>
33 #include <linux/tty.h>
34 #include <linux/serial.h>
35 #include <linux/serial_core.h>
36 #include <linux/efi.h>
37 #include <linux/initrd.h>
40 #include <asm/machvec.h>
42 #include <asm/meminit.h>
44 #include <asm/patch.h>
45 #include <asm/pgtable.h>
46 #include <asm/processor.h>
48 #include <asm/sections.h>
49 #include <asm/serial.h>
50 #include <asm/setup.h>
52 #include <asm/system.h>
53 #include <asm/unistd.h>
55 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
56 # error "struct cpuinfo_ia64 too big!"
60 unsigned long __per_cpu_offset
[NR_CPUS
];
61 EXPORT_SYMBOL(__per_cpu_offset
);
64 DEFINE_PER_CPU(struct cpuinfo_ia64
, cpu_info
);
65 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset
);
66 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8
);
67 unsigned long ia64_cycles_per_usec
;
68 struct ia64_boot_param
*ia64_boot_param
;
69 struct screen_info screen_info
;
71 unsigned long ia64_max_cacheline_size
;
72 unsigned long ia64_iobase
; /* virtual address for I/O accesses */
73 EXPORT_SYMBOL(ia64_iobase
);
74 struct io_space io_space
[MAX_IO_SPACES
];
75 EXPORT_SYMBOL(io_space
);
76 unsigned int num_io_spaces
;
79 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
80 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
81 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
82 * address of the second buffer must be aligned to (merge_mask+1) in order to be
83 * mergeable). By default, we assume there is no I/O MMU which can merge physically
84 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
87 unsigned long ia64_max_iommu_merge_mask
= ~0UL;
88 EXPORT_SYMBOL(ia64_max_iommu_merge_mask
);
91 * We use a special marker for the end of memory and it uses the extra (+1) slot
93 struct rsvd_region rsvd_region
[IA64_MAX_RSVD_REGIONS
+ 1];
98 * Filter incoming memory segments based on the primitive map created from the boot
99 * parameters. Segments contained in the map are removed from the memory ranges. A
100 * caller-specified function is called with the memory ranges that remain after filtering.
101 * This routine does not assume the incoming segments are sorted.
104 filter_rsvd_memory (unsigned long start
, unsigned long end
, void *arg
)
106 unsigned long range_start
, range_end
, prev_start
;
107 void (*func
)(unsigned long, unsigned long, int);
111 if (start
== PAGE_OFFSET
) {
112 printk(KERN_WARNING
"warning: skipping physical page 0\n");
114 if (start
>= end
) return 0;
118 * lowest possible address(walker uses virtual)
120 prev_start
= PAGE_OFFSET
;
123 for (i
= 0; i
< num_rsvd_regions
; ++i
) {
124 range_start
= max(start
, prev_start
);
125 range_end
= min(end
, rsvd_region
[i
].start
);
127 if (range_start
< range_end
)
128 call_pernode_memory(__pa(range_start
), range_end
- range_start
, func
);
130 /* nothing more available in this segment */
131 if (range_end
== end
) return 0;
133 prev_start
= rsvd_region
[i
].end
;
135 /* end of memory marker allows full processing inside loop body */
140 sort_regions (struct rsvd_region
*rsvd_region
, int max
)
144 /* simple bubble sorting */
146 for (j
= 0; j
< max
; ++j
) {
147 if (rsvd_region
[j
].start
> rsvd_region
[j
+1].start
) {
148 struct rsvd_region tmp
;
149 tmp
= rsvd_region
[j
];
150 rsvd_region
[j
] = rsvd_region
[j
+ 1];
151 rsvd_region
[j
+ 1] = tmp
;
158 * reserve_memory - setup reserved memory areas
160 * Setup the reserved memory areas set aside for the boot parameters,
161 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
162 * see include/asm-ia64/meminit.h if you need to define more.
165 reserve_memory (void)
170 * none of the entries in this table overlap
172 rsvd_region
[n
].start
= (unsigned long) ia64_boot_param
;
173 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ sizeof(*ia64_boot_param
);
176 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->efi_memmap
);
177 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->efi_memmap_size
;
180 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->command_line
);
181 rsvd_region
[n
].end
= (rsvd_region
[n
].start
182 + strlen(__va(ia64_boot_param
->command_line
)) + 1);
185 rsvd_region
[n
].start
= (unsigned long) ia64_imva((void *)KERNEL_START
);
186 rsvd_region
[n
].end
= (unsigned long) ia64_imva(_end
);
189 #ifdef CONFIG_BLK_DEV_INITRD
190 if (ia64_boot_param
->initrd_start
) {
191 rsvd_region
[n
].start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
192 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->initrd_size
;
197 /* end of memory marker */
198 rsvd_region
[n
].start
= ~0UL;
199 rsvd_region
[n
].end
= ~0UL;
202 num_rsvd_regions
= n
;
204 sort_regions(rsvd_region
, num_rsvd_regions
);
208 * find_initrd - get initrd parameters from the boot parameter structure
210 * Grab the initrd start and end from the boot parameter struct given us by
216 #ifdef CONFIG_BLK_DEV_INITRD
217 if (ia64_boot_param
->initrd_start
) {
218 initrd_start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
219 initrd_end
= initrd_start
+ia64_boot_param
->initrd_size
;
221 printk(KERN_INFO
"Initial ramdisk at: 0x%lx (%lu bytes)\n",
222 initrd_start
, ia64_boot_param
->initrd_size
);
230 extern unsigned long ia64_iobase
;
231 unsigned long phys_iobase
;
234 * Set `iobase' to the appropriate address in region 6 (uncached access range).
236 * The EFI memory map is the "preferred" location to get the I/O port space base,
237 * rather the relying on AR.KR0. This should become more clear in future SAL
238 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
239 * found in the memory map.
241 phys_iobase
= efi_get_iobase();
243 /* set AR.KR0 since this is all we use it for anyway */
244 ia64_set_kr(IA64_KR_IO_BASE
, phys_iobase
);
246 phys_iobase
= ia64_get_kr(IA64_KR_IO_BASE
);
247 printk(KERN_INFO
"No I/O port range found in EFI memory map, falling back "
249 printk(KERN_INFO
"I/O port base = 0x%lx\n", phys_iobase
);
251 ia64_iobase
= (unsigned long) ioremap(phys_iobase
, 0);
253 /* setup legacy IO port space */
254 io_space
[0].mmio_base
= ia64_iobase
;
255 io_space
[0].sparse
= 1;
260 * early_console_setup - setup debugging console
262 * Consoles started here require little enough setup that we can start using
263 * them very early in the boot process, either right after the machine
264 * vector initialization, or even before if the drivers can detect their hw.
266 * Returns non-zero if a console couldn't be setup.
268 static inline int __init
269 early_console_setup (char *cmdline
)
271 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
273 extern int sn_serial_console_early_setup(void);
274 if (!sn_serial_console_early_setup())
278 #ifdef CONFIG_EFI_PCDP
279 if (!efi_setup_pcdp_console(cmdline
))
282 #ifdef CONFIG_SERIAL_8250_CONSOLE
283 if (!early_serial_console_init(cmdline
))
291 mark_bsp_online (void)
294 /* If we register an early console, allow CPU 0 to printk */
295 cpu_set(smp_processor_id(), cpu_online_map
);
300 setup_arch (char **cmdline_p
)
304 ia64_patch_vtop((u64
) __start___vtop_patchlist
, (u64
) __end___vtop_patchlist
);
306 *cmdline_p
= __va(ia64_boot_param
->command_line
);
307 strlcpy(saved_command_line
, *cmdline_p
, COMMAND_LINE_SIZE
);
312 #ifdef CONFIG_IA64_GENERIC
314 const char *mvec_name
= strstr (*cmdline_p
, "machvec=");
322 end
= strchr (mvec_name
, ' ');
324 len
= end
- mvec_name
;
326 len
= strlen (mvec_name
);
327 len
= min(len
, sizeof (str
) - 1);
328 strncpy (str
, mvec_name
, len
);
332 mvec_name
= acpi_get_sysname();
333 machvec_init(mvec_name
);
337 if (early_console_setup(*cmdline_p
) == 0)
340 #ifdef CONFIG_ACPI_BOOT
341 /* Initialize the ACPI boot-time table parser */
343 # ifdef CONFIG_ACPI_NUMA
348 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
350 #endif /* CONFIG_APCI_BOOT */
354 /* process SAL system table: */
355 ia64_sal_init(efi
.sal_systab
);
358 cpu_physical_id(0) = hard_smp_processor_id();
361 cpu_init(); /* initialize the bootstrap CPU */
363 #ifdef CONFIG_ACPI_BOOT
369 # if defined(CONFIG_DUMMY_CONSOLE)
370 conswitchp
= &dummy_con
;
372 # if defined(CONFIG_VGA_CONSOLE)
374 * Non-legacy systems may route legacy VGA MMIO range to system
375 * memory. vga_con probes the MMIO hole, so memory looks like
376 * a VGA device to it. The EFI memory map can tell us if it's
377 * memory so we can avoid this problem.
379 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY
)
380 conswitchp
= &vga_con
;
385 /* enable IA-64 Machine Check Abort Handling unless disabled */
386 if (!strstr(saved_command_line
, "nomca"))
389 platform_setup(cmdline_p
);
394 * Display cpu info for all cpu's.
397 show_cpuinfo (struct seq_file
*m
, void *v
)
400 # define lpj c->loops_per_jiffy
401 # define cpunum c->cpu
403 # define lpj loops_per_jiffy
408 const char *feature_name
;
410 { 1UL << 0, "branchlong" },
411 { 1UL << 1, "spontaneous deferral"},
412 { 1UL << 2, "16-byte atomic ops" }
414 char family
[32], features
[128], *cp
, sep
;
415 struct cpuinfo_ia64
*c
= v
;
422 case 0x07: memcpy(family
, "Itanium", 8); break;
423 case 0x1f: memcpy(family
, "Itanium 2", 10); break;
424 default: sprintf(family
, "%u", c
->family
); break;
427 /* build the feature string: */
428 memcpy(features
, " standard", 10);
431 for (i
= 0; i
< (int) ARRAY_SIZE(feature_bits
); ++i
) {
432 if (mask
& feature_bits
[i
].mask
) {
437 strcpy(cp
, feature_bits
[i
].feature_name
);
438 cp
+= strlen(feature_bits
[i
].feature_name
);
439 mask
&= ~feature_bits
[i
].mask
;
443 /* print unknown features as a hex value: */
446 sprintf(cp
, " 0x%lx", mask
);
457 "features :%s\n" /* don't change this---it _is_ right! */
460 "cpu MHz : %lu.%06lu\n"
461 "itc MHz : %lu.%06lu\n"
462 "BogoMIPS : %lu.%02lu\n\n",
463 cpunum
, c
->vendor
, family
, c
->model
, c
->revision
, c
->archrev
,
464 features
, c
->ppn
, c
->number
,
465 c
->proc_freq
/ 1000000, c
->proc_freq
% 1000000,
466 c
->itc_freq
/ 1000000, c
->itc_freq
% 1000000,
467 lpj
*HZ
/500000, (lpj
*HZ
/5000) % 100);
472 c_start (struct seq_file
*m
, loff_t
*pos
)
475 while (*pos
< NR_CPUS
&& !cpu_isset(*pos
, cpu_online_map
))
478 return *pos
< NR_CPUS
? cpu_data(*pos
) : NULL
;
482 c_next (struct seq_file
*m
, void *v
, loff_t
*pos
)
485 return c_start(m
, pos
);
489 c_stop (struct seq_file
*m
, void *v
)
493 struct seq_operations cpuinfo_op
= {
501 identify_cpu (struct cpuinfo_ia64
*c
)
504 unsigned long bits
[5];
510 u64 ppn
; /* processor serial number */
514 unsigned revision
: 8;
517 unsigned archrev
: 8;
518 unsigned reserved
: 24;
524 pal_vm_info_1_u_t vm1
;
525 pal_vm_info_2_u_t vm2
;
527 unsigned long impl_va_msb
= 50, phys_addr_size
= 44; /* Itanium defaults */
530 for (i
= 0; i
< 5; ++i
)
531 cpuid
.bits
[i
] = ia64_get_cpuid(i
);
533 memcpy(c
->vendor
, cpuid
.field
.vendor
, 16);
535 c
->cpu
= smp_processor_id();
537 c
->ppn
= cpuid
.field
.ppn
;
538 c
->number
= cpuid
.field
.number
;
539 c
->revision
= cpuid
.field
.revision
;
540 c
->model
= cpuid
.field
.model
;
541 c
->family
= cpuid
.field
.family
;
542 c
->archrev
= cpuid
.field
.archrev
;
543 c
->features
= cpuid
.field
.features
;
545 status
= ia64_pal_vm_summary(&vm1
, &vm2
);
546 if (status
== PAL_STATUS_SUCCESS
) {
547 impl_va_msb
= vm2
.pal_vm_info_2_s
.impl_va_msb
;
548 phys_addr_size
= vm1
.pal_vm_info_1_s
.phys_add_size
;
550 c
->unimpl_va_mask
= ~((7L<<61) | ((1L << (impl_va_msb
+ 1)) - 1));
551 c
->unimpl_pa_mask
= ~((1L<<63) | ((1L << phys_addr_size
) - 1));
555 setup_per_cpu_areas (void)
557 /* start_kernel() requires this... */
561 get_max_cacheline_size (void)
563 unsigned long line_size
, max
= 1;
564 u64 l
, levels
, unique_caches
;
565 pal_cache_config_info_t cci
;
568 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
570 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed (status=%ld)\n",
571 __FUNCTION__
, status
);
572 max
= SMP_CACHE_BYTES
;
576 for (l
= 0; l
< levels
; ++l
) {
577 status
= ia64_pal_cache_config_info(l
, /* cache_type (data_or_unified)= */ 2,
581 "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n",
582 __FUNCTION__
, l
, status
);
583 max
= SMP_CACHE_BYTES
;
585 line_size
= 1 << cci
.pcci_line_size
;
590 if (max
> ia64_max_cacheline_size
)
591 ia64_max_cacheline_size
= max
;
595 * cpu_init() initializes state that is per-CPU. This function acts
596 * as a 'CPU state barrier', nothing should get across.
601 extern void __devinit
ia64_mmu_init (void *);
602 unsigned long num_phys_stacked
;
603 pal_vm_info_2_u_t vmi
;
604 unsigned int max_ctx
;
605 struct cpuinfo_ia64
*cpu_info
;
608 cpu_data
= per_cpu_init();
611 * We set ar.k3 so that assembly code in MCA handler can compute
612 * physical addresses of per cpu variables with a simple:
613 * phys = ar.k3 + &per_cpu_var
615 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
616 ia64_tpa(cpu_data
) - (long) __per_cpu_start
);
618 get_max_cacheline_size();
621 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
622 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
623 * depends on the data returned by identify_cpu(). We break the dependency by
624 * accessing cpu_data() through the canonical per-CPU address.
626 cpu_info
= cpu_data
+ ((char *) &__ia64_per_cpu_var(cpu_info
) - __per_cpu_start
);
627 identify_cpu(cpu_info
);
629 #ifdef CONFIG_MCKINLEY
631 # define FEATURE_SET 16
632 struct ia64_pal_retval iprv
;
634 if (cpu_info
->family
== 0x1f) {
635 PAL_CALL_PHYS(iprv
, PAL_PROC_GET_FEATURES
, 0, FEATURE_SET
, 0);
636 if ((iprv
.status
== 0) && (iprv
.v0
& 0x80) && (iprv
.v2
& 0x80))
637 PAL_CALL_PHYS(iprv
, PAL_PROC_SET_FEATURES
,
638 (iprv
.v1
| 0x80), FEATURE_SET
, 0);
643 /* Clear the stack memory reserved for pt_regs: */
644 memset(ia64_task_regs(current
), 0, sizeof(struct pt_regs
));
646 ia64_set_kr(IA64_KR_FPU_OWNER
, 0);
649 * Initialize the page-table base register to a global
650 * directory with all zeroes. This ensure that we can handle
651 * TLB-misses to user address-space even before we created the
652 * first user address-space. This may happen, e.g., due to
653 * aggressive use of lfetch.fault.
655 ia64_set_kr(IA64_KR_PT_BASE
, __pa(ia64_imva(empty_zero_page
)));
658 * Initialize default control register to defer all speculative faults. The
659 * kernel MUST NOT depend on a particular setting of these bits (in other words,
660 * the kernel must have recovery code for all speculative accesses). Turn on
661 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
662 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
665 ia64_setreg(_IA64_REG_CR_DCR
, ( IA64_DCR_DP
| IA64_DCR_DK
| IA64_DCR_DX
| IA64_DCR_DR
666 | IA64_DCR_DA
| IA64_DCR_DD
| IA64_DCR_LC
));
667 atomic_inc(&init_mm
.mm_count
);
668 current
->active_mm
= &init_mm
;
672 ia64_mmu_init(ia64_imva(cpu_data
));
673 ia64_mca_cpu_init(ia64_imva(cpu_data
));
675 #ifdef CONFIG_IA32_SUPPORT
679 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
682 /* disable all local interrupt sources: */
683 ia64_set_itv(1 << 16);
684 ia64_set_lrr0(1 << 16);
685 ia64_set_lrr1(1 << 16);
686 ia64_setreg(_IA64_REG_CR_PMV
, 1 << 16);
687 ia64_setreg(_IA64_REG_CR_CMCV
, 1 << 16);
689 /* clear TPR & XTP to enable all interrupt classes: */
690 ia64_setreg(_IA64_REG_CR_TPR
, 0);
695 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
696 if (ia64_pal_vm_summary(NULL
, &vmi
) == 0)
697 max_ctx
= (1U << (vmi
.pal_vm_info_2_s
.rid_size
- 3)) - 1;
699 printk(KERN_WARNING
"cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
700 max_ctx
= (1U << 15) - 1; /* use architected minimum */
702 while (max_ctx
< ia64_ctx
.max_ctx
) {
703 unsigned int old
= ia64_ctx
.max_ctx
;
704 if (cmpxchg(&ia64_ctx
.max_ctx
, old
, max_ctx
) == old
)
708 if (ia64_pal_rse_info(&num_phys_stacked
, NULL
) != 0) {
709 printk(KERN_WARNING
"cpu_init: PAL RSE info failed; assuming 96 physical "
711 num_phys_stacked
= 96;
713 /* size of physical stacked register partition plus 8 bytes: */
714 __get_cpu_var(ia64_phys_stacked_size_p8
) = num_phys_stacked
*8 + 8;
721 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles
,
722 (unsigned long) __end___mckinley_e9_bundles
);