]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Architecture-specific setup. | |
3 | * | |
4 | * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | * Stephane Eranian <eranian@hpl.hp.com> | |
e927ecb0 SS |
7 | * Copyright (C) 2000, 2004 Intel Corp |
8 | * Rohit Seth <rohit.seth@intel.com> | |
9 | * Suresh Siddha <suresh.b.siddha@intel.com> | |
10 | * Gordon Jin <gordon.jin@intel.com> | |
1da177e4 LT |
11 | * Copyright (C) 1999 VA Linux Systems |
12 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | |
13 | * | |
e927ecb0 SS |
14 | * 12/26/04 S.Siddha, G.Jin, R.Seth |
15 | * Add multi-threading and multi-core detection | |
1da177e4 LT |
16 | * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). |
17 | * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map | |
18 | * 03/31/00 R.Seth cpu_initialized and current->processor fixes | |
19 | * 02/04/00 D.Mosberger some more get_cpuinfo fixes... | |
20 | * 02/01/00 R.Seth fixed get_cpuinfo for SMP | |
21 | * 01/07/99 S.Eranian added the support for command line argument | |
22 | * 06/24/99 W.Drummond added boot_cpu_data. | |
08357f82 | 23 | * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" |
1da177e4 | 24 | */ |
1da177e4 LT |
25 | #include <linux/module.h> |
26 | #include <linux/init.h> | |
27 | ||
28 | #include <linux/acpi.h> | |
29 | #include <linux/bootmem.h> | |
30 | #include <linux/console.h> | |
31 | #include <linux/delay.h> | |
1777e463 | 32 | #include <linux/cpu.h> |
1da177e4 LT |
33 | #include <linux/kernel.h> |
34 | #include <linux/reboot.h> | |
68e21be2 | 35 | #include <linux/sched/mm.h> |
e6017571 | 36 | #include <linux/sched/clock.h> |
68db0cf1 | 37 | #include <linux/sched/task_stack.h> |
1da177e4 LT |
38 | #include <linux/seq_file.h> |
39 | #include <linux/string.h> | |
40 | #include <linux/threads.h> | |
894673ee | 41 | #include <linux/screen_info.h> |
3ed3bce8 | 42 | #include <linux/dmi.h> |
1da177e4 LT |
43 | #include <linux/serial.h> |
44 | #include <linux/serial_core.h> | |
45 | #include <linux/efi.h> | |
46 | #include <linux/initrd.h> | |
6c4fa560 | 47 | #include <linux/pm.h> |
95235ca2 | 48 | #include <linux/cpufreq.h> |
a7956113 ZN |
49 | #include <linux/kexec.h> |
50 | #include <linux/crash_dump.h> | |
1da177e4 | 51 | |
1da177e4 LT |
52 | #include <asm/machvec.h> |
53 | #include <asm/mca.h> | |
54 | #include <asm/meminit.h> | |
55 | #include <asm/page.h> | |
56 | #include <asm/patch.h> | |
57 | #include <asm/pgtable.h> | |
58 | #include <asm/processor.h> | |
59 | #include <asm/sal.h> | |
60 | #include <asm/sections.h> | |
1da177e4 LT |
61 | #include <asm/setup.h> |
62 | #include <asm/smp.h> | |
2046b94e | 63 | #include <asm/tlbflush.h> |
1da177e4 | 64 | #include <asm/unistd.h> |
8b713c67 | 65 | #include <asm/hpsim.h> |
1da177e4 LT |
66 | |
67 | #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) | |
68 | # error "struct cpuinfo_ia64 too big!" | |
69 | #endif | |
70 | ||
71 | #ifdef CONFIG_SMP | |
72 | unsigned long __per_cpu_offset[NR_CPUS]; | |
73 | EXPORT_SYMBOL(__per_cpu_offset); | |
74 | #endif | |
75 | ||
877105cc | 76 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
e007c533 | 77 | EXPORT_SYMBOL(ia64_cpu_info); |
1da177e4 | 78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
e007c533 AV |
79 | #ifdef CONFIG_SMP |
80 | EXPORT_SYMBOL(local_per_cpu_offset); | |
81 | #endif | |
1da177e4 LT |
82 | unsigned long ia64_cycles_per_usec; |
83 | struct ia64_boot_param *ia64_boot_param; | |
84 | struct screen_info screen_info; | |
66b7f8a3 MM |
85 | unsigned long vga_console_iobase; |
86 | unsigned long vga_console_membase; | |
1da177e4 | 87 | |
be379124 KA |
88 | static struct resource data_resource = { |
89 | .name = "Kernel data", | |
03cb525e | 90 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
be379124 KA |
91 | }; |
92 | ||
93 | static struct resource code_resource = { | |
94 | .name = "Kernel code", | |
03cb525e | 95 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
be379124 | 96 | }; |
00bf4098 BW |
97 | |
98 | static struct resource bss_resource = { | |
99 | .name = "Kernel bss", | |
03cb525e | 100 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
00bf4098 | 101 | }; |
be379124 | 102 | |
1da177e4 | 103 | unsigned long ia64_max_cacheline_size; |
e1531b42 | 104 | |
1da177e4 LT |
105 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ |
106 | EXPORT_SYMBOL(ia64_iobase); | |
107 | struct io_space io_space[MAX_IO_SPACES]; | |
108 | EXPORT_SYMBOL(io_space); | |
109 | unsigned int num_io_spaces; | |
110 | ||
08357f82 ZM |
111 | /* |
112 | * "flush_icache_range()" needs to know what processor dependent stride size to use | |
113 | * when it makes i-cache(s) coherent with d-caches. | |
114 | */ | |
115 | #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ | |
116 | unsigned long ia64_i_cache_stride_shift = ~0; | |
62fdd767 FY |
117 | /* |
118 | * "clflush_cache_range()" needs to know what processor dependent stride size to | |
119 | * use when it flushes cache lines including both d-cache and i-cache. | |
120 | */ | |
121 | /* Safest way to go: 32 bytes by 32 bytes */ | |
122 | #define CACHE_STRIDE_SHIFT 5 | |
123 | unsigned long ia64_cache_stride_shift = ~0; | |
08357f82 | 124 | |
1da177e4 LT |
125 | /* |
126 | * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This | |
127 | * mask specifies a mask of address bits that must be 0 in order for two buffers to be | |
128 | * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start | |
129 | * address of the second buffer must be aligned to (merge_mask+1) in order to be | |
130 | * mergeable). By default, we assume there is no I/O MMU which can merge physically | |
131 | * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu | |
132 | * page-size of 2^64. | |
133 | */ | |
134 | unsigned long ia64_max_iommu_merge_mask = ~0UL; | |
135 | EXPORT_SYMBOL(ia64_max_iommu_merge_mask); | |
136 | ||
137 | /* | |
138 | * We use a special marker for the end of memory and it uses the extra (+1) slot | |
139 | */ | |
dae28066 KC |
140 | struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; |
141 | int num_rsvd_regions __initdata; | |
1da177e4 LT |
142 | |
143 | ||
144 | /* | |
145 | * Filter incoming memory segments based on the primitive map created from the boot | |
146 | * parameters. Segments contained in the map are removed from the memory ranges. A | |
147 | * caller-specified function is called with the memory ranges that remain after filtering. | |
148 | * This routine does not assume the incoming segments are sorted. | |
149 | */ | |
dae28066 | 150 | int __init |
e088a4ad | 151 | filter_rsvd_memory (u64 start, u64 end, void *arg) |
1da177e4 | 152 | { |
e088a4ad | 153 | u64 range_start, range_end, prev_start; |
1da177e4 LT |
154 | void (*func)(unsigned long, unsigned long, int); |
155 | int i; | |
156 | ||
157 | #if IGNORE_PFN0 | |
158 | if (start == PAGE_OFFSET) { | |
159 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | |
160 | start += PAGE_SIZE; | |
161 | if (start >= end) return 0; | |
162 | } | |
163 | #endif | |
164 | /* | |
165 | * lowest possible address(walker uses virtual) | |
166 | */ | |
167 | prev_start = PAGE_OFFSET; | |
168 | func = arg; | |
169 | ||
170 | for (i = 0; i < num_rsvd_regions; ++i) { | |
171 | range_start = max(start, prev_start); | |
172 | range_end = min(end, rsvd_region[i].start); | |
173 | ||
174 | if (range_start < range_end) | |
175 | call_pernode_memory(__pa(range_start), range_end - range_start, func); | |
176 | ||
177 | /* nothing more available in this segment */ | |
178 | if (range_end == end) return 0; | |
179 | ||
180 | prev_start = rsvd_region[i].end; | |
181 | } | |
182 | /* end of memory marker allows full processing inside loop body */ | |
183 | return 0; | |
184 | } | |
185 | ||
98075d24 ZM |
186 | /* |
187 | * Similar to "filter_rsvd_memory()", but the reserved memory ranges | |
188 | * are not filtered out. | |
189 | */ | |
190 | int __init | |
e088a4ad | 191 | filter_memory(u64 start, u64 end, void *arg) |
98075d24 ZM |
192 | { |
193 | void (*func)(unsigned long, unsigned long, int); | |
194 | ||
195 | #if IGNORE_PFN0 | |
196 | if (start == PAGE_OFFSET) { | |
197 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | |
198 | start += PAGE_SIZE; | |
199 | if (start >= end) | |
200 | return 0; | |
201 | } | |
202 | #endif | |
203 | func = arg; | |
204 | if (start < end) | |
205 | call_pernode_memory(__pa(start), end - start, func); | |
206 | return 0; | |
207 | } | |
208 | ||
dae28066 | 209 | static void __init |
1da177e4 LT |
210 | sort_regions (struct rsvd_region *rsvd_region, int max) |
211 | { | |
212 | int j; | |
213 | ||
214 | /* simple bubble sorting */ | |
215 | while (max--) { | |
216 | for (j = 0; j < max; ++j) { | |
217 | if (rsvd_region[j].start > rsvd_region[j+1].start) { | |
218 | struct rsvd_region tmp; | |
219 | tmp = rsvd_region[j]; | |
220 | rsvd_region[j] = rsvd_region[j + 1]; | |
221 | rsvd_region[j + 1] = tmp; | |
222 | } | |
223 | } | |
224 | } | |
225 | } | |
226 | ||
76d71ebd PT |
227 | /* merge overlaps */ |
228 | static int __init | |
229 | merge_regions (struct rsvd_region *rsvd_region, int max) | |
230 | { | |
231 | int i; | |
232 | for (i = 1; i < max; ++i) { | |
233 | if (rsvd_region[i].start >= rsvd_region[i-1].end) | |
234 | continue; | |
235 | if (rsvd_region[i].end > rsvd_region[i-1].end) | |
236 | rsvd_region[i-1].end = rsvd_region[i].end; | |
237 | --max; | |
238 | memmove(&rsvd_region[i], &rsvd_region[i+1], | |
239 | (max - i) * sizeof(struct rsvd_region)); | |
240 | } | |
241 | return max; | |
242 | } | |
243 | ||
be379124 KA |
244 | /* |
245 | * Request address space for all standard resources | |
246 | */ | |
247 | static int __init register_memory(void) | |
248 | { | |
249 | code_resource.start = ia64_tpa(_text); | |
250 | code_resource.end = ia64_tpa(_etext) - 1; | |
251 | data_resource.start = ia64_tpa(_etext); | |
00bf4098 | 252 | data_resource.end = ia64_tpa(_edata) - 1; |
b898a424 | 253 | bss_resource.start = ia64_tpa(__bss_start); |
00bf4098 BW |
254 | bss_resource.end = ia64_tpa(_end) - 1; |
255 | efi_initialize_iomem_resources(&code_resource, &data_resource, | |
256 | &bss_resource); | |
be379124 KA |
257 | |
258 | return 0; | |
259 | } | |
260 | ||
261 | __initcall(register_memory); | |
262 | ||
cb380853 BW |
263 | |
264 | #ifdef CONFIG_KEXEC | |
8a3360f0 BW |
265 | |
266 | /* | |
267 | * This function checks if the reserved crashkernel is allowed on the specific | |
268 | * IA64 machine flavour. Machines without an IO TLB use swiotlb and require | |
269 | * some memory below 4 GB (i.e. in 32 bit area), see the implementation of | |
270 | * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that | |
271 | * in kdump case. See the comment in sba_init() in sba_iommu.c. | |
272 | * | |
273 | * So, the only machvec that really supports loading the kdump kernel | |
274 | * over 4 GB is "sn2". | |
275 | */ | |
276 | static int __init check_crashkernel_memory(unsigned long pbase, size_t size) | |
277 | { | |
278 | if (ia64_platform_is("sn2") || ia64_platform_is("uv")) | |
279 | return 1; | |
280 | else | |
281 | return pbase < (1UL << 32); | |
282 | } | |
283 | ||
cb380853 BW |
284 | static void __init setup_crashkernel(unsigned long total, int *n) |
285 | { | |
286 | unsigned long long base = 0, size = 0; | |
287 | int ret; | |
288 | ||
289 | ret = parse_crashkernel(boot_command_line, total, | |
290 | &size, &base); | |
291 | if (ret == 0 && size > 0) { | |
292 | if (!base) { | |
293 | sort_regions(rsvd_region, *n); | |
76d71ebd | 294 | *n = merge_regions(rsvd_region, *n); |
cb380853 BW |
295 | base = kdump_find_rsvd_region(size, |
296 | rsvd_region, *n); | |
297 | } | |
8a3360f0 BW |
298 | |
299 | if (!check_crashkernel_memory(base, size)) { | |
300 | pr_warning("crashkernel: There would be kdump memory " | |
301 | "at %ld GB but this is unusable because it " | |
302 | "must\nbe below 4 GB. Change the memory " | |
303 | "configuration of the machine.\n", | |
304 | (unsigned long)(base >> 30)); | |
305 | return; | |
306 | } | |
307 | ||
cb380853 BW |
308 | if (base != ~0UL) { |
309 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | |
310 | "for crashkernel (System RAM: %ldMB)\n", | |
311 | (unsigned long)(size >> 20), | |
312 | (unsigned long)(base >> 20), | |
313 | (unsigned long)(total >> 20)); | |
314 | rsvd_region[*n].start = | |
315 | (unsigned long)__va(base); | |
316 | rsvd_region[*n].end = | |
317 | (unsigned long)__va(base + size); | |
318 | (*n)++; | |
319 | crashk_res.start = base; | |
320 | crashk_res.end = base + size - 1; | |
321 | } | |
322 | } | |
323 | efi_memmap_res.start = ia64_boot_param->efi_memmap; | |
324 | efi_memmap_res.end = efi_memmap_res.start + | |
325 | ia64_boot_param->efi_memmap_size; | |
326 | boot_param_res.start = __pa(ia64_boot_param); | |
327 | boot_param_res.end = boot_param_res.start + | |
328 | sizeof(*ia64_boot_param); | |
329 | } | |
330 | #else | |
331 | static inline void __init setup_crashkernel(unsigned long total, int *n) | |
332 | {} | |
333 | #endif | |
334 | ||
1da177e4 LT |
335 | /** |
336 | * reserve_memory - setup reserved memory areas | |
337 | * | |
338 | * Setup the reserved memory areas set aside for the boot parameters, | |
339 | * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, | |
7f30491c | 340 | * see arch/ia64/include/asm/meminit.h if you need to define more. |
1da177e4 | 341 | */ |
dae28066 | 342 | void __init |
1da177e4 LT |
343 | reserve_memory (void) |
344 | { | |
345 | int n = 0; | |
cb380853 | 346 | unsigned long total_memory; |
1da177e4 LT |
347 | |
348 | /* | |
349 | * none of the entries in this table overlap | |
350 | */ | |
351 | rsvd_region[n].start = (unsigned long) ia64_boot_param; | |
352 | rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); | |
353 | n++; | |
354 | ||
355 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); | |
356 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; | |
357 | n++; | |
358 | ||
359 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); | |
360 | rsvd_region[n].end = (rsvd_region[n].start | |
361 | + strlen(__va(ia64_boot_param->command_line)) + 1); | |
362 | n++; | |
363 | ||
364 | rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); | |
365 | rsvd_region[n].end = (unsigned long) ia64_imva(_end); | |
366 | n++; | |
367 | ||
368 | #ifdef CONFIG_BLK_DEV_INITRD | |
369 | if (ia64_boot_param->initrd_start) { | |
370 | rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); | |
371 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; | |
372 | n++; | |
373 | } | |
374 | #endif | |
375 | ||
17c1f07e | 376 | #ifdef CONFIG_CRASH_DUMP |
cee87af2 MD |
377 | if (reserve_elfcorehdr(&rsvd_region[n].start, |
378 | &rsvd_region[n].end) == 0) | |
379 | n++; | |
380 | #endif | |
381 | ||
cb380853 | 382 | total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); |
d8c97d5f TL |
383 | n++; |
384 | ||
cb380853 BW |
385 | setup_crashkernel(total_memory, &n); |
386 | ||
1da177e4 LT |
387 | /* end of memory marker */ |
388 | rsvd_region[n].start = ~0UL; | |
389 | rsvd_region[n].end = ~0UL; | |
390 | n++; | |
391 | ||
392 | num_rsvd_regions = n; | |
5eb1d63f | 393 | BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); |
1da177e4 LT |
394 | |
395 | sort_regions(rsvd_region, num_rsvd_regions); | |
76d71ebd | 396 | num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); |
1da177e4 LT |
397 | } |
398 | ||
a7956113 | 399 | |
1da177e4 LT |
400 | /** |
401 | * find_initrd - get initrd parameters from the boot parameter structure | |
402 | * | |
403 | * Grab the initrd start and end from the boot parameter struct given us by | |
404 | * the boot loader. | |
405 | */ | |
dae28066 | 406 | void __init |
1da177e4 LT |
407 | find_initrd (void) |
408 | { | |
409 | #ifdef CONFIG_BLK_DEV_INITRD | |
410 | if (ia64_boot_param->initrd_start) { | |
411 | initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); | |
412 | initrd_end = initrd_start+ia64_boot_param->initrd_size; | |
413 | ||
e088a4ad | 414 | printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", |
1da177e4 LT |
415 | initrd_start, ia64_boot_param->initrd_size); |
416 | } | |
417 | #endif | |
418 | } | |
419 | ||
420 | static void __init | |
421 | io_port_init (void) | |
422 | { | |
1da177e4 LT |
423 | unsigned long phys_iobase; |
424 | ||
425 | /* | |
44c45120 BH |
426 | * Set `iobase' based on the EFI memory map or, failing that, the |
427 | * value firmware left in ar.k0. | |
1da177e4 | 428 | * |
44c45120 BH |
429 | * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute |
430 | * the port's virtual address, so ia32_load_state() loads it with a | |
431 | * user virtual address. But in ia64 mode, glibc uses the | |
432 | * *physical* address in ar.k0 to mmap the appropriate area from | |
433 | * /dev/mem, and the inX()/outX() interfaces use MMIO. In both | |
434 | * cases, user-mode can only use the legacy 0-64K I/O port space. | |
435 | * | |
436 | * ar.k0 is not involved in kernel I/O port accesses, which can use | |
437 | * any of the I/O port spaces and are done via MMIO using the | |
438 | * virtual mmio_base from the appropriate io_space[]. | |
1da177e4 LT |
439 | */ |
440 | phys_iobase = efi_get_iobase(); | |
44c45120 | 441 | if (!phys_iobase) { |
1da177e4 | 442 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); |
44c45120 BH |
443 | printk(KERN_INFO "No I/O port range found in EFI memory map, " |
444 | "falling back to AR.KR0 (0x%lx)\n", phys_iobase); | |
1da177e4 LT |
445 | } |
446 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | |
44c45120 | 447 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); |
1da177e4 LT |
448 | |
449 | /* setup legacy IO port space */ | |
450 | io_space[0].mmio_base = ia64_iobase; | |
451 | io_space[0].sparse = 1; | |
452 | num_io_spaces = 1; | |
453 | } | |
454 | ||
455 | /** | |
456 | * early_console_setup - setup debugging console | |
457 | * | |
458 | * Consoles started here require little enough setup that we can start using | |
459 | * them very early in the boot process, either right after the machine | |
460 | * vector initialization, or even before if the drivers can detect their hw. | |
461 | * | |
462 | * Returns non-zero if a console couldn't be setup. | |
463 | */ | |
464 | static inline int __init | |
465 | early_console_setup (char *cmdline) | |
466 | { | |
66b7f8a3 MM |
467 | int earlycons = 0; |
468 | ||
1da177e4 LT |
469 | #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE |
470 | { | |
471 | extern int sn_serial_console_early_setup(void); | |
472 | if (!sn_serial_console_early_setup()) | |
66b7f8a3 | 473 | earlycons++; |
1da177e4 LT |
474 | } |
475 | #endif | |
476 | #ifdef CONFIG_EFI_PCDP | |
477 | if (!efi_setup_pcdp_console(cmdline)) | |
66b7f8a3 | 478 | earlycons++; |
1da177e4 | 479 | #endif |
8b713c67 | 480 | if (!simcons_register()) |
471e7a44 | 481 | earlycons++; |
1da177e4 | 482 | |
66b7f8a3 | 483 | return (earlycons) ? 0 : -1; |
1da177e4 LT |
484 | } |
485 | ||
486 | static inline void | |
487 | mark_bsp_online (void) | |
488 | { | |
489 | #ifdef CONFIG_SMP | |
490 | /* If we register an early console, allow CPU 0 to printk */ | |
7d7f9848 | 491 | set_cpu_online(smp_processor_id(), true); |
1da177e4 LT |
492 | #endif |
493 | } | |
494 | ||
a5b00bb4 H |
495 | static __initdata int nomca; |
496 | static __init int setup_nomca(char *s) | |
497 | { | |
498 | nomca = 1; | |
499 | return 0; | |
500 | } | |
501 | early_param("nomca", setup_nomca); | |
502 | ||
57cac4d1 | 503 | #ifdef CONFIG_CRASH_DUMP |
e088a4ad | 504 | int __init reserve_elfcorehdr(u64 *start, u64 *end) |
cee87af2 | 505 | { |
e088a4ad | 506 | u64 length; |
cee87af2 MD |
507 | |
508 | /* We get the address using the kernel command line, | |
509 | * but the size is extracted from the EFI tables. | |
510 | * Both address and size are required for reservation | |
511 | * to work properly. | |
512 | */ | |
513 | ||
85a0ee34 | 514 | if (!is_vmcore_usable()) |
cee87af2 MD |
515 | return -EINVAL; |
516 | ||
517 | if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { | |
85a0ee34 | 518 | vmcore_unusable(); |
cee87af2 MD |
519 | return -EINVAL; |
520 | } | |
521 | ||
522 | *start = (unsigned long)__va(elfcorehdr_addr); | |
523 | *end = *start + length; | |
524 | return 0; | |
525 | } | |
526 | ||
45a98fc6 H |
527 | #endif /* CONFIG_PROC_VMCORE */ |
528 | ||
1da177e4 LT |
529 | void __init |
530 | setup_arch (char **cmdline_p) | |
531 | { | |
532 | unw_init(); | |
533 | ||
534 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); | |
535 | ||
536 | *cmdline_p = __va(ia64_boot_param->command_line); | |
a8d91b84 | 537 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
1da177e4 LT |
538 | |
539 | efi_init(); | |
540 | io_port_init(); | |
541 | ||
542 | #ifdef CONFIG_IA64_GENERIC | |
a07ee862 H |
543 | /* machvec needs to be parsed from the command line |
544 | * before parse_early_param() is called to ensure | |
545 | * that ia64_mv is initialised before any command line | |
546 | * settings may cause console setup to occur | |
547 | */ | |
548 | machvec_init_from_cmdline(*cmdline_p); | |
1da177e4 LT |
549 | #endif |
550 | ||
a07ee862 H |
551 | parse_early_param(); |
552 | ||
1da177e4 LT |
553 | if (early_console_setup(*cmdline_p) == 0) |
554 | mark_bsp_online(); | |
555 | ||
888ba6c6 | 556 | #ifdef CONFIG_ACPI |
1da177e4 LT |
557 | /* Initialize the ACPI boot-time table parser */ |
558 | acpi_table_init(); | |
62ee0540 | 559 | early_acpi_boot_init(); |
1da177e4 LT |
560 | # ifdef CONFIG_ACPI_NUMA |
561 | acpi_numa_init(); | |
312521d0 | 562 | acpi_numa_fixup(); |
12cda817 | 563 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
62ee0540 | 564 | prefill_possible_map(); |
12cda817 | 565 | # endif |
5d2068da RR |
566 | per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? |
567 | 32 : cpumask_weight(&early_cpu_possible_map)), | |
dd4f0888 | 568 | additional_cpus > 0 ? additional_cpus : 0); |
1da177e4 | 569 | # endif |
1da177e4 LT |
570 | #endif /* CONFIG_APCI_BOOT */ |
571 | ||
12cda817 TH |
572 | #ifdef CONFIG_SMP |
573 | smp_build_cpu_map(); | |
574 | #endif | |
1da177e4 LT |
575 | find_memory(); |
576 | ||
577 | /* process SAL system table: */ | |
b2c99e3c | 578 | ia64_sal_init(__va(efi.sal_systab)); |
1da177e4 | 579 | |
4dcc29e1 TL |
580 | #ifdef CONFIG_ITANIUM |
581 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | |
582 | #else | |
583 | { | |
e088a4ad | 584 | unsigned long num_phys_stacked; |
4dcc29e1 TL |
585 | |
586 | if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) | |
587 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | |
588 | } | |
589 | #endif | |
590 | ||
1da177e4 LT |
591 | #ifdef CONFIG_SMP |
592 | cpu_physical_id(0) = hard_smp_processor_id(); | |
593 | #endif | |
594 | ||
595 | cpu_init(); /* initialize the bootstrap CPU */ | |
dcc17d1b | 596 | mmu_context_init(); /* initialize context_id bitmap */ |
1da177e4 | 597 | |
1da177e4 LT |
598 | #ifdef CONFIG_VT |
599 | if (!conswitchp) { | |
600 | # if defined(CONFIG_DUMMY_CONSOLE) | |
601 | conswitchp = &dummy_con; | |
602 | # endif | |
603 | # if defined(CONFIG_VGA_CONSOLE) | |
604 | /* | |
605 | * Non-legacy systems may route legacy VGA MMIO range to system | |
606 | * memory. vga_con probes the MMIO hole, so memory looks like | |
607 | * a VGA device to it. The EFI memory map can tell us if it's | |
608 | * memory so we can avoid this problem. | |
609 | */ | |
610 | if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) | |
611 | conswitchp = &vga_con; | |
612 | # endif | |
613 | } | |
614 | #endif | |
615 | ||
616 | /* enable IA-64 Machine Check Abort Handling unless disabled */ | |
a5b00bb4 | 617 | if (!nomca) |
1da177e4 LT |
618 | ia64_mca_init(); |
619 | ||
620 | platform_setup(cmdline_p); | |
06f95ea8 | 621 | #ifndef CONFIG_IA64_HP_SIM |
2826f8c0 | 622 | check_sal_cache_flush(); |
06f95ea8 | 623 | #endif |
1da177e4 | 624 | paging_init(); |
acb04058 PZ |
625 | |
626 | clear_sched_clock_stable(); | |
1da177e4 LT |
627 | } |
628 | ||
629 | /* | |
72fdbdce | 630 | * Display cpu info for all CPUs. |
1da177e4 LT |
631 | */ |
632 | static int | |
633 | show_cpuinfo (struct seq_file *m, void *v) | |
634 | { | |
635 | #ifdef CONFIG_SMP | |
636 | # define lpj c->loops_per_jiffy | |
637 | # define cpunum c->cpu | |
638 | #else | |
639 | # define lpj loops_per_jiffy | |
640 | # define cpunum 0 | |
641 | #endif | |
642 | static struct { | |
643 | unsigned long mask; | |
644 | const char *feature_name; | |
645 | } feature_bits[] = { | |
646 | { 1UL << 0, "branchlong" }, | |
647 | { 1UL << 1, "spontaneous deferral"}, | |
648 | { 1UL << 2, "16-byte atomic ops" } | |
649 | }; | |
ae0af3e3 | 650 | char features[128], *cp, *sep; |
1da177e4 LT |
651 | struct cpuinfo_ia64 *c = v; |
652 | unsigned long mask; | |
38c0b2c2 | 653 | unsigned long proc_freq; |
ae0af3e3 | 654 | int i, size; |
1da177e4 LT |
655 | |
656 | mask = c->features; | |
657 | ||
1da177e4 | 658 | /* build the feature string: */ |
ae0af3e3 | 659 | memcpy(features, "standard", 9); |
1da177e4 | 660 | cp = features; |
ae0af3e3 AG |
661 | size = sizeof(features); |
662 | sep = ""; | |
663 | for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { | |
1da177e4 | 664 | if (mask & feature_bits[i].mask) { |
ae0af3e3 AG |
665 | cp += snprintf(cp, size, "%s%s", sep, |
666 | feature_bits[i].feature_name), | |
667 | sep = ", "; | |
1da177e4 | 668 | mask &= ~feature_bits[i].mask; |
ae0af3e3 | 669 | size = sizeof(features) - (cp - features); |
1da177e4 LT |
670 | } |
671 | } | |
ae0af3e3 AG |
672 | if (mask && size > 1) { |
673 | /* print unknown features as a hex value */ | |
674 | snprintf(cp, size, "%s0x%lx", sep, mask); | |
1da177e4 LT |
675 | } |
676 | ||
95235ca2 VP |
677 | proc_freq = cpufreq_quick_get(cpunum); |
678 | if (!proc_freq) | |
679 | proc_freq = c->proc_freq / 1000; | |
680 | ||
1da177e4 LT |
681 | seq_printf(m, |
682 | "processor : %d\n" | |
683 | "vendor : %s\n" | |
684 | "arch : IA-64\n" | |
76d08bb3 | 685 | "family : %u\n" |
1da177e4 | 686 | "model : %u\n" |
76d08bb3 | 687 | "model name : %s\n" |
1da177e4 LT |
688 | "revision : %u\n" |
689 | "archrev : %u\n" | |
ae0af3e3 | 690 | "features : %s\n" |
1da177e4 LT |
691 | "cpu number : %lu\n" |
692 | "cpu regs : %u\n" | |
8a3a78d1 | 693 | "cpu MHz : %lu.%03lu\n" |
1da177e4 | 694 | "itc MHz : %lu.%06lu\n" |
e927ecb0 | 695 | "BogoMIPS : %lu.%02lu\n", |
76d08bb3 TL |
696 | cpunum, c->vendor, c->family, c->model, |
697 | c->model_name, c->revision, c->archrev, | |
1da177e4 | 698 | features, c->ppn, c->number, |
95235ca2 | 699 | proc_freq / 1000, proc_freq % 1000, |
1da177e4 LT |
700 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
701 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | |
e927ecb0 | 702 | #ifdef CONFIG_SMP |
5d2068da RR |
703 | seq_printf(m, "siblings : %u\n", |
704 | cpumask_weight(&cpu_core_map[cpunum])); | |
113134fc AC |
705 | if (c->socket_id != -1) |
706 | seq_printf(m, "physical id: %u\n", c->socket_id); | |
e927ecb0 SS |
707 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) |
708 | seq_printf(m, | |
113134fc AC |
709 | "core id : %u\n" |
710 | "thread id : %u\n", | |
711 | c->core_id, c->thread_id); | |
e927ecb0 SS |
712 | #endif |
713 | seq_printf(m,"\n"); | |
714 | ||
1da177e4 LT |
715 | return 0; |
716 | } | |
717 | ||
718 | static void * | |
719 | c_start (struct seq_file *m, loff_t *pos) | |
720 | { | |
721 | #ifdef CONFIG_SMP | |
5dd3c994 | 722 | while (*pos < nr_cpu_ids && !cpu_online(*pos)) |
1da177e4 LT |
723 | ++*pos; |
724 | #endif | |
5dd3c994 | 725 | return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; |
1da177e4 LT |
726 | } |
727 | ||
728 | static void * | |
729 | c_next (struct seq_file *m, void *v, loff_t *pos) | |
730 | { | |
731 | ++*pos; | |
732 | return c_start(m, pos); | |
733 | } | |
734 | ||
735 | static void | |
736 | c_stop (struct seq_file *m, void *v) | |
737 | { | |
738 | } | |
739 | ||
a23fe55e | 740 | const struct seq_operations cpuinfo_op = { |
1da177e4 LT |
741 | .start = c_start, |
742 | .next = c_next, | |
743 | .stop = c_stop, | |
744 | .show = show_cpuinfo | |
745 | }; | |
746 | ||
c5e83e3f JS |
747 | #define MAX_BRANDS 8 |
748 | static char brandname[MAX_BRANDS][128]; | |
76d08bb3 | 749 | |
ccce9bb8 | 750 | static char * |
76d08bb3 TL |
751 | get_model_name(__u8 family, __u8 model) |
752 | { | |
c5e83e3f | 753 | static int overflow; |
76d08bb3 | 754 | char brand[128]; |
c5e83e3f | 755 | int i; |
76d08bb3 | 756 | |
75f6a1de | 757 | memcpy(brand, "Unknown", 8); |
76d08bb3 TL |
758 | if (ia64_pal_get_brand_info(brand)) { |
759 | if (family == 0x7) | |
760 | memcpy(brand, "Merced", 7); | |
761 | else if (family == 0x1f) switch (model) { | |
762 | case 0: memcpy(brand, "McKinley", 9); break; | |
763 | case 1: memcpy(brand, "Madison", 8); break; | |
764 | case 2: memcpy(brand, "Madison up to 9M cache", 23); break; | |
75f6a1de | 765 | } |
76d08bb3 | 766 | } |
c5e83e3f JS |
767 | for (i = 0; i < MAX_BRANDS; i++) |
768 | if (strcmp(brandname[i], brand) == 0) | |
769 | return brandname[i]; | |
770 | for (i = 0; i < MAX_BRANDS; i++) | |
771 | if (brandname[i][0] == '\0') | |
772 | return strcpy(brandname[i], brand); | |
773 | if (overflow++ == 0) | |
774 | printk(KERN_ERR | |
775 | "%s: Table overflow. Some processor model information will be missing\n", | |
d4ed8084 | 776 | __func__); |
c5e83e3f | 777 | return "Unknown"; |
76d08bb3 TL |
778 | } |
779 | ||
ccce9bb8 | 780 | static void |
1da177e4 LT |
781 | identify_cpu (struct cpuinfo_ia64 *c) |
782 | { | |
783 | union { | |
784 | unsigned long bits[5]; | |
785 | struct { | |
786 | /* id 0 & 1: */ | |
787 | char vendor[16]; | |
788 | ||
789 | /* id 2 */ | |
790 | u64 ppn; /* processor serial number */ | |
791 | ||
792 | /* id 3: */ | |
793 | unsigned number : 8; | |
794 | unsigned revision : 8; | |
795 | unsigned model : 8; | |
796 | unsigned family : 8; | |
797 | unsigned archrev : 8; | |
798 | unsigned reserved : 24; | |
799 | ||
800 | /* id 4: */ | |
801 | u64 features; | |
802 | } field; | |
803 | } cpuid; | |
804 | pal_vm_info_1_u_t vm1; | |
805 | pal_vm_info_2_u_t vm2; | |
806 | pal_status_t status; | |
807 | unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ | |
808 | int i; | |
1da177e4 LT |
809 | for (i = 0; i < 5; ++i) |
810 | cpuid.bits[i] = ia64_get_cpuid(i); | |
811 | ||
812 | memcpy(c->vendor, cpuid.field.vendor, 16); | |
813 | #ifdef CONFIG_SMP | |
814 | c->cpu = smp_processor_id(); | |
e927ecb0 SS |
815 | |
816 | /* below default values will be overwritten by identify_siblings() | |
72fdbdce | 817 | * for Multi-Threading/Multi-Core capable CPUs |
e927ecb0 SS |
818 | */ |
819 | c->threads_per_core = c->cores_per_socket = c->num_log = 1; | |
820 | c->socket_id = -1; | |
821 | ||
822 | identify_siblings(c); | |
113134fc AC |
823 | |
824 | if (c->threads_per_core > smp_num_siblings) | |
825 | smp_num_siblings = c->threads_per_core; | |
1da177e4 LT |
826 | #endif |
827 | c->ppn = cpuid.field.ppn; | |
828 | c->number = cpuid.field.number; | |
829 | c->revision = cpuid.field.revision; | |
830 | c->model = cpuid.field.model; | |
831 | c->family = cpuid.field.family; | |
832 | c->archrev = cpuid.field.archrev; | |
833 | c->features = cpuid.field.features; | |
76d08bb3 | 834 | c->model_name = get_model_name(c->family, c->model); |
1da177e4 LT |
835 | |
836 | status = ia64_pal_vm_summary(&vm1, &vm2); | |
837 | if (status == PAL_STATUS_SUCCESS) { | |
838 | impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; | |
839 | phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; | |
840 | } | |
841 | c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); | |
842 | c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); | |
843 | } | |
844 | ||
08357f82 | 845 | /* |
62fdd767 | 846 | * Do the following calculations: |
08357f82 | 847 | * |
62fdd767 FY |
848 | * 1. the max. cache line size. |
849 | * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". | |
850 | * 3. the minimum of the cache stride sizes for "clflush_cache_range()". | |
08357f82 | 851 | */ |
ccce9bb8 | 852 | static void |
62fdd767 | 853 | get_cache_info(void) |
1da177e4 LT |
854 | { |
855 | unsigned long line_size, max = 1; | |
e088a4ad MW |
856 | unsigned long l, levels, unique_caches; |
857 | pal_cache_config_info_t cci; | |
858 | long status; | |
1da177e4 LT |
859 | |
860 | status = ia64_pal_cache_summary(&levels, &unique_caches); | |
861 | if (status != 0) { | |
862 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", | |
d4ed8084 | 863 | __func__, status); |
1da177e4 | 864 | max = SMP_CACHE_BYTES; |
08357f82 ZM |
865 | /* Safest setup for "flush_icache_range()" */ |
866 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; | |
62fdd767 FY |
867 | /* Safest setup for "clflush_cache_range()" */ |
868 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | |
1da177e4 LT |
869 | goto out; |
870 | } | |
871 | ||
872 | for (l = 0; l < levels; ++l) { | |
62fdd767 FY |
873 | /* cache_type (data_or_unified)=2 */ |
874 | status = ia64_pal_cache_config_info(l, 2, &cci); | |
1da177e4 | 875 | if (status != 0) { |
e088a4ad MW |
876 | printk(KERN_ERR "%s: ia64_pal_cache_config_info" |
877 | "(l=%lu, 2) failed (status=%ld)\n", | |
878 | __func__, l, status); | |
1da177e4 | 879 | max = SMP_CACHE_BYTES; |
08357f82 ZM |
880 | /* The safest setup for "flush_icache_range()" */ |
881 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; | |
62fdd767 FY |
882 | /* The safest setup for "clflush_cache_range()" */ |
883 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | |
08357f82 | 884 | cci.pcci_unified = 1; |
62fdd767 FY |
885 | } else { |
886 | if (cci.pcci_stride < ia64_cache_stride_shift) | |
887 | ia64_cache_stride_shift = cci.pcci_stride; | |
888 | ||
889 | line_size = 1 << cci.pcci_line_size; | |
890 | if (line_size > max) | |
891 | max = line_size; | |
1da177e4 | 892 | } |
62fdd767 | 893 | |
08357f82 | 894 | if (!cci.pcci_unified) { |
62fdd767 FY |
895 | /* cache_type (instruction)=1*/ |
896 | status = ia64_pal_cache_config_info(l, 1, &cci); | |
08357f82 | 897 | if (status != 0) { |
e088a4ad MW |
898 | printk(KERN_ERR "%s: ia64_pal_cache_config_info" |
899 | "(l=%lu, 1) failed (status=%ld)\n", | |
d4ed8084 | 900 | __func__, l, status); |
e088a4ad | 901 | /* The safest setup for flush_icache_range() */ |
08357f82 ZM |
902 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; |
903 | } | |
904 | } | |
905 | if (cci.pcci_stride < ia64_i_cache_stride_shift) | |
906 | ia64_i_cache_stride_shift = cci.pcci_stride; | |
907 | } | |
1da177e4 LT |
908 | out: |
909 | if (max > ia64_max_cacheline_size) | |
910 | ia64_max_cacheline_size = max; | |
911 | } | |
912 | ||
913 | /* | |
914 | * cpu_init() initializes state that is per-CPU. This function acts | |
915 | * as a 'CPU state barrier', nothing should get across. | |
916 | */ | |
ccce9bb8 | 917 | void |
1da177e4 LT |
918 | cpu_init (void) |
919 | { | |
ccce9bb8 | 920 | extern void ia64_mmu_init(void *); |
a0776ec8 | 921 | static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; |
1da177e4 LT |
922 | unsigned long num_phys_stacked; |
923 | pal_vm_info_2_u_t vmi; | |
924 | unsigned int max_ctx; | |
925 | struct cpuinfo_ia64 *cpu_info; | |
926 | void *cpu_data; | |
927 | ||
928 | cpu_data = per_cpu_init(); | |
4d1efed5 | 929 | #ifdef CONFIG_SMP |
d5a7430d MT |
930 | /* |
931 | * insert boot cpu into sibling and core mapes | |
932 | * (must be done after per_cpu area is setup) | |
933 | */ | |
934 | if (smp_processor_id() == 0) { | |
5d2068da RR |
935 | cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); |
936 | cpumask_set_cpu(0, &cpu_core_map[0]); | |
10617bbe TL |
937 | } else { |
938 | /* | |
939 | * Set ar.k3 so that assembly code in MCA handler can compute | |
940 | * physical addresses of per cpu variables with a simple: | |
941 | * phys = ar.k3 + &per_cpu_var | |
942 | * and the alt-dtlb-miss handler can set per-cpu mapping into | |
943 | * the TLB when needed. head.S already did this for cpu0. | |
944 | */ | |
945 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | |
946 | ia64_tpa(cpu_data) - (long) __per_cpu_start); | |
d5a7430d | 947 | } |
4d1efed5 | 948 | #endif |
1da177e4 | 949 | |
62fdd767 | 950 | get_cache_info(); |
1da177e4 LT |
951 | |
952 | /* | |
953 | * We can't pass "local_cpu_data" to identify_cpu() because we haven't called | |
954 | * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it | |
955 | * depends on the data returned by identify_cpu(). We break the dependency by | |
956 | * accessing cpu_data() through the canonical per-CPU address. | |
957 | */ | |
877105cc | 958 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
1da177e4 LT |
959 | identify_cpu(cpu_info); |
960 | ||
961 | #ifdef CONFIG_MCKINLEY | |
962 | { | |
963 | # define FEATURE_SET 16 | |
964 | struct ia64_pal_retval iprv; | |
965 | ||
966 | if (cpu_info->family == 0x1f) { | |
967 | PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); | |
968 | if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) | |
969 | PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, | |
970 | (iprv.v1 | 0x80), FEATURE_SET, 0); | |
971 | } | |
972 | } | |
973 | #endif | |
974 | ||
975 | /* Clear the stack memory reserved for pt_regs: */ | |
6450578f | 976 | memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); |
1da177e4 LT |
977 | |
978 | ia64_set_kr(IA64_KR_FPU_OWNER, 0); | |
979 | ||
980 | /* | |
981 | * Initialize the page-table base register to a global | |
982 | * directory with all zeroes. This ensure that we can handle | |
983 | * TLB-misses to user address-space even before we created the | |
984 | * first user address-space. This may happen, e.g., due to | |
985 | * aggressive use of lfetch.fault. | |
986 | */ | |
987 | ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); | |
988 | ||
989 | /* | |
86ebacd3 TL |
990 | * Initialize default control register to defer speculative faults except |
991 | * for those arising from TLB misses, which are not deferred. The | |
1da177e4 LT |
992 | * kernel MUST NOT depend on a particular setting of these bits (in other words, |
993 | * the kernel must have recovery code for all speculative accesses). Turn on | |
994 | * dcr.lc as per recommendation by the architecture team. Most IA-32 apps | |
995 | * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll | |
996 | * be fine). | |
997 | */ | |
998 | ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | |
999 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | |
f1f10076 | 1000 | mmgrab(&init_mm); |
1da177e4 | 1001 | current->active_mm = &init_mm; |
80a03e29 | 1002 | BUG_ON(current->mm); |
1da177e4 LT |
1003 | |
1004 | ia64_mmu_init(ia64_imva(cpu_data)); | |
1005 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | |
1006 | ||
72fdbdce | 1007 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ |
1da177e4 LT |
1008 | ia64_set_itc(0); |
1009 | ||
1010 | /* disable all local interrupt sources: */ | |
1011 | ia64_set_itv(1 << 16); | |
1012 | ia64_set_lrr0(1 << 16); | |
1013 | ia64_set_lrr1(1 << 16); | |
1014 | ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); | |
1015 | ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); | |
1016 | ||
1017 | /* clear TPR & XTP to enable all interrupt classes: */ | |
1018 | ia64_setreg(_IA64_REG_CR_TPR, 0); | |
f740e6c9 KK |
1019 | |
1020 | /* Clear any pending interrupts left by SAL/EFI */ | |
1021 | while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) | |
1022 | ia64_eoi(); | |
1023 | ||
1da177e4 LT |
1024 | #ifdef CONFIG_SMP |
1025 | normal_xtp(); | |
1026 | #endif | |
1027 | ||
1028 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ | |
2046b94e | 1029 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) { |
1da177e4 | 1030 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; |
a6c75b86 | 1031 | setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); |
2046b94e | 1032 | } else { |
1da177e4 LT |
1033 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); |
1034 | max_ctx = (1U << 15) - 1; /* use architected minimum */ | |
1035 | } | |
1036 | while (max_ctx < ia64_ctx.max_ctx) { | |
1037 | unsigned int old = ia64_ctx.max_ctx; | |
1038 | if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) | |
1039 | break; | |
1040 | } | |
1041 | ||
1042 | if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { | |
1043 | printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " | |
1044 | "stacked regs\n"); | |
1045 | num_phys_stacked = 96; | |
1046 | } | |
1047 | /* size of physical stacked register partition plus 8 bytes: */ | |
a0776ec8 KC |
1048 | if (num_phys_stacked > max_num_phys_stacked) { |
1049 | ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); | |
1050 | max_num_phys_stacked = num_phys_stacked; | |
1051 | } | |
1da177e4 LT |
1052 | platform_cpu_init(); |
1053 | } | |
1054 | ||
244fd545 | 1055 | void __init |
1da177e4 LT |
1056 | check_bugs (void) |
1057 | { | |
1058 | ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, | |
1059 | (unsigned long) __end___mckinley_e9_bundles); | |
1060 | } | |
3ed3bce8 MD |
1061 | |
1062 | static int __init run_dmi_scan(void) | |
1063 | { | |
1064 | dmi_scan_machine(); | |
dd6dad42 | 1065 | dmi_memdev_walk(); |
98e5e1bf | 1066 | dmi_set_dump_stack_arch_desc(); |
3ed3bce8 MD |
1067 | return 0; |
1068 | } | |
1069 | core_initcall(run_dmi_scan); |