]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * Architecture-specific setup. | |
4 | * | |
5 | * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * Stephane Eranian <eranian@hpl.hp.com> | |
e927ecb0 SS |
8 | * Copyright (C) 2000, 2004 Intel Corp |
9 | * Rohit Seth <rohit.seth@intel.com> | |
10 | * Suresh Siddha <suresh.b.siddha@intel.com> | |
11 | * Gordon Jin <gordon.jin@intel.com> | |
1da177e4 LT |
12 | * Copyright (C) 1999 VA Linux Systems |
13 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | |
14 | * | |
e927ecb0 SS |
15 | * 12/26/04 S.Siddha, G.Jin, R.Seth |
16 | * Add multi-threading and multi-core detection | |
1da177e4 LT |
17 | * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). |
18 | * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map | |
19 | * 03/31/00 R.Seth cpu_initialized and current->processor fixes | |
20 | * 02/04/00 D.Mosberger some more get_cpuinfo fixes... | |
21 | * 02/01/00 R.Seth fixed get_cpuinfo for SMP | |
22 | * 01/07/99 S.Eranian added the support for command line argument | |
23 | * 06/24/99 W.Drummond added boot_cpu_data. | |
08357f82 | 24 | * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" |
1da177e4 | 25 | */ |
1da177e4 LT |
26 | #include <linux/module.h> |
27 | #include <linux/init.h> | |
65fddcfc | 28 | #include <linux/pgtable.h> |
1da177e4 LT |
29 | |
30 | #include <linux/acpi.h> | |
1da177e4 LT |
31 | #include <linux/console.h> |
32 | #include <linux/delay.h> | |
1777e463 | 33 | #include <linux/cpu.h> |
fa809d70 | 34 | #include <linux/kdev_t.h> |
1da177e4 | 35 | #include <linux/kernel.h> |
f6280099 | 36 | #include <linux/memblock.h> |
1da177e4 | 37 | #include <linux/reboot.h> |
68e21be2 | 38 | #include <linux/sched/mm.h> |
e6017571 | 39 | #include <linux/sched/clock.h> |
68db0cf1 | 40 | #include <linux/sched/task_stack.h> |
1da177e4 LT |
41 | #include <linux/seq_file.h> |
42 | #include <linux/string.h> | |
43 | #include <linux/threads.h> | |
894673ee | 44 | #include <linux/screen_info.h> |
3ed3bce8 | 45 | #include <linux/dmi.h> |
fa809d70 | 46 | #include <linux/root_dev.h> |
1da177e4 LT |
47 | #include <linux/serial.h> |
48 | #include <linux/serial_core.h> | |
49 | #include <linux/efi.h> | |
50 | #include <linux/initrd.h> | |
6c4fa560 | 51 | #include <linux/pm.h> |
95235ca2 | 52 | #include <linux/cpufreq.h> |
a7956113 ZN |
53 | #include <linux/kexec.h> |
54 | #include <linux/crash_dump.h> | |
1da177e4 | 55 | |
1da177e4 LT |
56 | #include <asm/mca.h> |
57 | #include <asm/meminit.h> | |
58 | #include <asm/page.h> | |
59 | #include <asm/patch.h> | |
1da177e4 LT |
60 | #include <asm/processor.h> |
61 | #include <asm/sal.h> | |
62 | #include <asm/sections.h> | |
1da177e4 LT |
63 | #include <asm/setup.h> |
64 | #include <asm/smp.h> | |
2046b94e | 65 | #include <asm/tlbflush.h> |
1da177e4 | 66 | #include <asm/unistd.h> |
df41017e | 67 | #include <asm/uv/uv.h> |
b3545192 | 68 | #include <asm/xtp.h> |
1da177e4 LT |
69 | |
70 | #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) | |
71 | # error "struct cpuinfo_ia64 too big!" | |
72 | #endif | |
73 | ||
df41017e CH |
74 | char ia64_platform_name[64]; |
75 | ||
1da177e4 LT |
76 | #ifdef CONFIG_SMP |
77 | unsigned long __per_cpu_offset[NR_CPUS]; | |
78 | EXPORT_SYMBOL(__per_cpu_offset); | |
79 | #endif | |
80 | ||
877105cc | 81 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
e007c533 | 82 | EXPORT_SYMBOL(ia64_cpu_info); |
1da177e4 | 83 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
e007c533 AV |
84 | #ifdef CONFIG_SMP |
85 | EXPORT_SYMBOL(local_per_cpu_offset); | |
86 | #endif | |
1da177e4 LT |
87 | unsigned long ia64_cycles_per_usec; |
88 | struct ia64_boot_param *ia64_boot_param; | |
89 | struct screen_info screen_info; | |
66b7f8a3 MM |
90 | unsigned long vga_console_iobase; |
91 | unsigned long vga_console_membase; | |
1da177e4 | 92 | |
be379124 KA |
93 | static struct resource data_resource = { |
94 | .name = "Kernel data", | |
03cb525e | 95 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
be379124 KA |
96 | }; |
97 | ||
98 | static struct resource code_resource = { | |
99 | .name = "Kernel code", | |
03cb525e | 100 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
be379124 | 101 | }; |
00bf4098 BW |
102 | |
103 | static struct resource bss_resource = { | |
104 | .name = "Kernel bss", | |
03cb525e | 105 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
00bf4098 | 106 | }; |
be379124 | 107 | |
1da177e4 | 108 | unsigned long ia64_max_cacheline_size; |
e1531b42 | 109 | |
1da177e4 LT |
110 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ |
111 | EXPORT_SYMBOL(ia64_iobase); | |
112 | struct io_space io_space[MAX_IO_SPACES]; | |
113 | EXPORT_SYMBOL(io_space); | |
114 | unsigned int num_io_spaces; | |
115 | ||
08357f82 ZM |
116 | /* |
117 | * "flush_icache_range()" needs to know what processor dependent stride size to use | |
118 | * when it makes i-cache(s) coherent with d-caches. | |
119 | */ | |
120 | #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ | |
121 | unsigned long ia64_i_cache_stride_shift = ~0; | |
62fdd767 FY |
122 | /* |
123 | * "clflush_cache_range()" needs to know what processor dependent stride size to | |
124 | * use when it flushes cache lines including both d-cache and i-cache. | |
125 | */ | |
126 | /* Safest way to go: 32 bytes by 32 bytes */ | |
127 | #define CACHE_STRIDE_SHIFT 5 | |
128 | unsigned long ia64_cache_stride_shift = ~0; | |
08357f82 | 129 | |
1da177e4 LT |
130 | /* |
131 | * We use a special marker for the end of memory and it uses the extra (+1) slot | |
132 | */ | |
dae28066 KC |
133 | struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; |
134 | int num_rsvd_regions __initdata; | |
1da177e4 LT |
135 | |
136 | ||
137 | /* | |
138 | * Filter incoming memory segments based on the primitive map created from the boot | |
139 | * parameters. Segments contained in the map are removed from the memory ranges. A | |
140 | * caller-specified function is called with the memory ranges that remain after filtering. | |
141 | * This routine does not assume the incoming segments are sorted. | |
142 | */ | |
dae28066 | 143 | int __init |
e088a4ad | 144 | filter_rsvd_memory (u64 start, u64 end, void *arg) |
1da177e4 | 145 | { |
e088a4ad | 146 | u64 range_start, range_end, prev_start; |
1da177e4 LT |
147 | void (*func)(unsigned long, unsigned long, int); |
148 | int i; | |
149 | ||
150 | #if IGNORE_PFN0 | |
151 | if (start == PAGE_OFFSET) { | |
152 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | |
153 | start += PAGE_SIZE; | |
154 | if (start >= end) return 0; | |
155 | } | |
156 | #endif | |
157 | /* | |
158 | * lowest possible address(walker uses virtual) | |
159 | */ | |
160 | prev_start = PAGE_OFFSET; | |
161 | func = arg; | |
162 | ||
163 | for (i = 0; i < num_rsvd_regions; ++i) { | |
164 | range_start = max(start, prev_start); | |
165 | range_end = min(end, rsvd_region[i].start); | |
166 | ||
167 | if (range_start < range_end) | |
168 | call_pernode_memory(__pa(range_start), range_end - range_start, func); | |
169 | ||
170 | /* nothing more available in this segment */ | |
171 | if (range_end == end) return 0; | |
172 | ||
173 | prev_start = rsvd_region[i].end; | |
174 | } | |
175 | /* end of memory marker allows full processing inside loop body */ | |
176 | return 0; | |
177 | } | |
178 | ||
98075d24 ZM |
179 | /* |
180 | * Similar to "filter_rsvd_memory()", but the reserved memory ranges | |
181 | * are not filtered out. | |
182 | */ | |
183 | int __init | |
e088a4ad | 184 | filter_memory(u64 start, u64 end, void *arg) |
98075d24 ZM |
185 | { |
186 | void (*func)(unsigned long, unsigned long, int); | |
187 | ||
188 | #if IGNORE_PFN0 | |
189 | if (start == PAGE_OFFSET) { | |
190 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | |
191 | start += PAGE_SIZE; | |
192 | if (start >= end) | |
193 | return 0; | |
194 | } | |
195 | #endif | |
196 | func = arg; | |
197 | if (start < end) | |
198 | call_pernode_memory(__pa(start), end - start, func); | |
199 | return 0; | |
200 | } | |
201 | ||
dae28066 | 202 | static void __init |
1da177e4 LT |
203 | sort_regions (struct rsvd_region *rsvd_region, int max) |
204 | { | |
205 | int j; | |
206 | ||
207 | /* simple bubble sorting */ | |
208 | while (max--) { | |
209 | for (j = 0; j < max; ++j) { | |
210 | if (rsvd_region[j].start > rsvd_region[j+1].start) { | |
211 | struct rsvd_region tmp; | |
212 | tmp = rsvd_region[j]; | |
213 | rsvd_region[j] = rsvd_region[j + 1]; | |
214 | rsvd_region[j + 1] = tmp; | |
215 | } | |
216 | } | |
217 | } | |
218 | } | |
219 | ||
76d71ebd PT |
220 | /* merge overlaps */ |
221 | static int __init | |
222 | merge_regions (struct rsvd_region *rsvd_region, int max) | |
223 | { | |
224 | int i; | |
225 | for (i = 1; i < max; ++i) { | |
226 | if (rsvd_region[i].start >= rsvd_region[i-1].end) | |
227 | continue; | |
228 | if (rsvd_region[i].end > rsvd_region[i-1].end) | |
229 | rsvd_region[i-1].end = rsvd_region[i].end; | |
230 | --max; | |
231 | memmove(&rsvd_region[i], &rsvd_region[i+1], | |
232 | (max - i) * sizeof(struct rsvd_region)); | |
233 | } | |
234 | return max; | |
235 | } | |
236 | ||
be379124 KA |
237 | /* |
238 | * Request address space for all standard resources | |
239 | */ | |
240 | static int __init register_memory(void) | |
241 | { | |
242 | code_resource.start = ia64_tpa(_text); | |
243 | code_resource.end = ia64_tpa(_etext) - 1; | |
244 | data_resource.start = ia64_tpa(_etext); | |
00bf4098 | 245 | data_resource.end = ia64_tpa(_edata) - 1; |
b898a424 | 246 | bss_resource.start = ia64_tpa(__bss_start); |
00bf4098 BW |
247 | bss_resource.end = ia64_tpa(_end) - 1; |
248 | efi_initialize_iomem_resources(&code_resource, &data_resource, | |
249 | &bss_resource); | |
be379124 KA |
250 | |
251 | return 0; | |
252 | } | |
253 | ||
254 | __initcall(register_memory); | |
255 | ||
cb380853 BW |
256 | |
257 | #ifdef CONFIG_KEXEC | |
8a3360f0 BW |
258 | |
259 | /* | |
260 | * This function checks if the reserved crashkernel is allowed on the specific | |
261 | * IA64 machine flavour. Machines without an IO TLB use swiotlb and require | |
262 | * some memory below 4 GB (i.e. in 32 bit area), see the implementation of | |
392e879a | 263 | * kernel/dma/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that |
8a3360f0 BW |
264 | * in kdump case. See the comment in sba_init() in sba_iommu.c. |
265 | * | |
266 | * So, the only machvec that really supports loading the kdump kernel | |
cf07cb1f | 267 | * over 4 GB is "uv". |
8a3360f0 BW |
268 | */ |
269 | static int __init check_crashkernel_memory(unsigned long pbase, size_t size) | |
270 | { | |
df41017e | 271 | if (is_uv_system()) |
8a3360f0 BW |
272 | return 1; |
273 | else | |
274 | return pbase < (1UL << 32); | |
275 | } | |
276 | ||
cb380853 BW |
277 | static void __init setup_crashkernel(unsigned long total, int *n) |
278 | { | |
279 | unsigned long long base = 0, size = 0; | |
280 | int ret; | |
281 | ||
282 | ret = parse_crashkernel(boot_command_line, total, | |
283 | &size, &base); | |
284 | if (ret == 0 && size > 0) { | |
285 | if (!base) { | |
286 | sort_regions(rsvd_region, *n); | |
76d71ebd | 287 | *n = merge_regions(rsvd_region, *n); |
cb380853 BW |
288 | base = kdump_find_rsvd_region(size, |
289 | rsvd_region, *n); | |
290 | } | |
8a3360f0 BW |
291 | |
292 | if (!check_crashkernel_memory(base, size)) { | |
94348b81 | 293 | pr_warn("crashkernel: There would be kdump memory " |
8a3360f0 BW |
294 | "at %ld GB but this is unusable because it " |
295 | "must\nbe below 4 GB. Change the memory " | |
296 | "configuration of the machine.\n", | |
297 | (unsigned long)(base >> 30)); | |
298 | return; | |
299 | } | |
300 | ||
cb380853 BW |
301 | if (base != ~0UL) { |
302 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | |
303 | "for crashkernel (System RAM: %ldMB)\n", | |
304 | (unsigned long)(size >> 20), | |
305 | (unsigned long)(base >> 20), | |
306 | (unsigned long)(total >> 20)); | |
307 | rsvd_region[*n].start = | |
308 | (unsigned long)__va(base); | |
309 | rsvd_region[*n].end = | |
310 | (unsigned long)__va(base + size); | |
311 | (*n)++; | |
312 | crashk_res.start = base; | |
313 | crashk_res.end = base + size - 1; | |
314 | } | |
315 | } | |
316 | efi_memmap_res.start = ia64_boot_param->efi_memmap; | |
317 | efi_memmap_res.end = efi_memmap_res.start + | |
318 | ia64_boot_param->efi_memmap_size; | |
319 | boot_param_res.start = __pa(ia64_boot_param); | |
320 | boot_param_res.end = boot_param_res.start + | |
321 | sizeof(*ia64_boot_param); | |
322 | } | |
323 | #else | |
324 | static inline void __init setup_crashkernel(unsigned long total, int *n) | |
325 | {} | |
326 | #endif | |
327 | ||
1da177e4 LT |
328 | /** |
329 | * reserve_memory - setup reserved memory areas | |
330 | * | |
331 | * Setup the reserved memory areas set aside for the boot parameters, | |
332 | * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, | |
7f30491c | 333 | * see arch/ia64/include/asm/meminit.h if you need to define more. |
1da177e4 | 334 | */ |
dae28066 | 335 | void __init |
1da177e4 LT |
336 | reserve_memory (void) |
337 | { | |
338 | int n = 0; | |
cb380853 | 339 | unsigned long total_memory; |
1da177e4 LT |
340 | |
341 | /* | |
342 | * none of the entries in this table overlap | |
343 | */ | |
344 | rsvd_region[n].start = (unsigned long) ia64_boot_param; | |
345 | rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); | |
346 | n++; | |
347 | ||
348 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); | |
349 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; | |
350 | n++; | |
351 | ||
352 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); | |
353 | rsvd_region[n].end = (rsvd_region[n].start | |
354 | + strlen(__va(ia64_boot_param->command_line)) + 1); | |
355 | n++; | |
356 | ||
357 | rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); | |
358 | rsvd_region[n].end = (unsigned long) ia64_imva(_end); | |
359 | n++; | |
360 | ||
361 | #ifdef CONFIG_BLK_DEV_INITRD | |
362 | if (ia64_boot_param->initrd_start) { | |
363 | rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); | |
364 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; | |
365 | n++; | |
366 | } | |
367 | #endif | |
368 | ||
17c1f07e | 369 | #ifdef CONFIG_CRASH_DUMP |
cee87af2 MD |
370 | if (reserve_elfcorehdr(&rsvd_region[n].start, |
371 | &rsvd_region[n].end) == 0) | |
372 | n++; | |
373 | #endif | |
374 | ||
cb380853 | 375 | total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); |
d8c97d5f TL |
376 | n++; |
377 | ||
cb380853 BW |
378 | setup_crashkernel(total_memory, &n); |
379 | ||
1da177e4 LT |
380 | /* end of memory marker */ |
381 | rsvd_region[n].start = ~0UL; | |
382 | rsvd_region[n].end = ~0UL; | |
383 | n++; | |
384 | ||
385 | num_rsvd_regions = n; | |
5eb1d63f | 386 | BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); |
1da177e4 LT |
387 | |
388 | sort_regions(rsvd_region, num_rsvd_regions); | |
76d71ebd | 389 | num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); |
1da177e4 | 390 | |
f6280099 MR |
391 | /* reserve all regions except the end of memory marker with memblock */ |
392 | for (n = 0; n < num_rsvd_regions - 1; n++) { | |
393 | struct rsvd_region *region = &rsvd_region[n]; | |
394 | phys_addr_t addr = __pa(region->start); | |
395 | phys_addr_t size = region->end - region->start; | |
396 | ||
397 | memblock_reserve(addr, size); | |
398 | } | |
399 | } | |
a7956113 | 400 | |
1da177e4 LT |
401 | /** |
402 | * find_initrd - get initrd parameters from the boot parameter structure | |
403 | * | |
404 | * Grab the initrd start and end from the boot parameter struct given us by | |
405 | * the boot loader. | |
406 | */ | |
dae28066 | 407 | void __init |
1da177e4 LT |
408 | find_initrd (void) |
409 | { | |
410 | #ifdef CONFIG_BLK_DEV_INITRD | |
411 | if (ia64_boot_param->initrd_start) { | |
412 | initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); | |
413 | initrd_end = initrd_start+ia64_boot_param->initrd_size; | |
414 | ||
e088a4ad | 415 | printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", |
1da177e4 LT |
416 | initrd_start, ia64_boot_param->initrd_size); |
417 | } | |
418 | #endif | |
419 | } | |
420 | ||
421 | static void __init | |
422 | io_port_init (void) | |
423 | { | |
1da177e4 LT |
424 | unsigned long phys_iobase; |
425 | ||
426 | /* | |
44c45120 BH |
427 | * Set `iobase' based on the EFI memory map or, failing that, the |
428 | * value firmware left in ar.k0. | |
1da177e4 | 429 | * |
44c45120 BH |
430 | * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute |
431 | * the port's virtual address, so ia32_load_state() loads it with a | |
432 | * user virtual address. But in ia64 mode, glibc uses the | |
433 | * *physical* address in ar.k0 to mmap the appropriate area from | |
434 | * /dev/mem, and the inX()/outX() interfaces use MMIO. In both | |
435 | * cases, user-mode can only use the legacy 0-64K I/O port space. | |
436 | * | |
437 | * ar.k0 is not involved in kernel I/O port accesses, which can use | |
438 | * any of the I/O port spaces and are done via MMIO using the | |
439 | * virtual mmio_base from the appropriate io_space[]. | |
1da177e4 LT |
440 | */ |
441 | phys_iobase = efi_get_iobase(); | |
44c45120 | 442 | if (!phys_iobase) { |
1da177e4 | 443 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); |
44c45120 BH |
444 | printk(KERN_INFO "No I/O port range found in EFI memory map, " |
445 | "falling back to AR.KR0 (0x%lx)\n", phys_iobase); | |
1da177e4 LT |
446 | } |
447 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | |
44c45120 | 448 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); |
1da177e4 LT |
449 | |
450 | /* setup legacy IO port space */ | |
451 | io_space[0].mmio_base = ia64_iobase; | |
452 | io_space[0].sparse = 1; | |
453 | num_io_spaces = 1; | |
454 | } | |
455 | ||
456 | /** | |
457 | * early_console_setup - setup debugging console | |
458 | * | |
459 | * Consoles started here require little enough setup that we can start using | |
460 | * them very early in the boot process, either right after the machine | |
461 | * vector initialization, or even before if the drivers can detect their hw. | |
462 | * | |
463 | * Returns non-zero if a console couldn't be setup. | |
464 | */ | |
465 | static inline int __init | |
466 | early_console_setup (char *cmdline) | |
467 | { | |
1da177e4 LT |
468 | #ifdef CONFIG_EFI_PCDP |
469 | if (!efi_setup_pcdp_console(cmdline)) | |
fc5bad03 | 470 | return 0; |
1da177e4 | 471 | #endif |
fc5bad03 | 472 | return -1; |
1da177e4 LT |
473 | } |
474 | ||
a8384e6c CH |
475 | static void __init |
476 | screen_info_setup(void) | |
477 | { | |
478 | unsigned int orig_x, orig_y, num_cols, num_rows, font_height; | |
479 | ||
480 | memset(&screen_info, 0, sizeof(screen_info)); | |
481 | ||
482 | if (!ia64_boot_param->console_info.num_rows || | |
483 | !ia64_boot_param->console_info.num_cols) { | |
484 | printk(KERN_WARNING "invalid screen-info, guessing 80x25\n"); | |
485 | orig_x = 0; | |
486 | orig_y = 0; | |
487 | num_cols = 80; | |
488 | num_rows = 25; | |
489 | font_height = 16; | |
490 | } else { | |
491 | orig_x = ia64_boot_param->console_info.orig_x; | |
492 | orig_y = ia64_boot_param->console_info.orig_y; | |
493 | num_cols = ia64_boot_param->console_info.num_cols; | |
494 | num_rows = ia64_boot_param->console_info.num_rows; | |
495 | font_height = 400 / num_rows; | |
496 | } | |
497 | ||
498 | screen_info.orig_x = orig_x; | |
499 | screen_info.orig_y = orig_y; | |
500 | screen_info.orig_video_cols = num_cols; | |
501 | screen_info.orig_video_lines = num_rows; | |
502 | screen_info.orig_video_points = font_height; | |
503 | screen_info.orig_video_mode = 3; /* XXX fake */ | |
504 | screen_info.orig_video_isVGA = 1; /* XXX fake */ | |
505 | screen_info.orig_video_ega_bx = 3; /* XXX fake */ | |
506 | } | |
507 | ||
1da177e4 LT |
508 | static inline void |
509 | mark_bsp_online (void) | |
510 | { | |
511 | #ifdef CONFIG_SMP | |
512 | /* If we register an early console, allow CPU 0 to printk */ | |
7d7f9848 | 513 | set_cpu_online(smp_processor_id(), true); |
1da177e4 LT |
514 | #endif |
515 | } | |
516 | ||
a5b00bb4 H |
517 | static __initdata int nomca; |
518 | static __init int setup_nomca(char *s) | |
519 | { | |
520 | nomca = 1; | |
521 | return 0; | |
522 | } | |
523 | early_param("nomca", setup_nomca); | |
524 | ||
57cac4d1 | 525 | #ifdef CONFIG_CRASH_DUMP |
e088a4ad | 526 | int __init reserve_elfcorehdr(u64 *start, u64 *end) |
cee87af2 | 527 | { |
e088a4ad | 528 | u64 length; |
cee87af2 MD |
529 | |
530 | /* We get the address using the kernel command line, | |
531 | * but the size is extracted from the EFI tables. | |
532 | * Both address and size are required for reservation | |
533 | * to work properly. | |
534 | */ | |
535 | ||
85a0ee34 | 536 | if (!is_vmcore_usable()) |
cee87af2 MD |
537 | return -EINVAL; |
538 | ||
539 | if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { | |
85a0ee34 | 540 | vmcore_unusable(); |
cee87af2 MD |
541 | return -EINVAL; |
542 | } | |
543 | ||
544 | *start = (unsigned long)__va(elfcorehdr_addr); | |
545 | *end = *start + length; | |
546 | return 0; | |
547 | } | |
548 | ||
45a98fc6 H |
549 | #endif /* CONFIG_PROC_VMCORE */ |
550 | ||
1da177e4 LT |
551 | void __init |
552 | setup_arch (char **cmdline_p) | |
553 | { | |
554 | unw_init(); | |
555 | ||
556 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); | |
557 | ||
558 | *cmdline_p = __va(ia64_boot_param->command_line); | |
a8d91b84 | 559 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
1da177e4 LT |
560 | |
561 | efi_init(); | |
562 | io_port_init(); | |
563 | ||
df41017e | 564 | uv_probe_system_type(); |
a07ee862 H |
565 | parse_early_param(); |
566 | ||
1da177e4 LT |
567 | if (early_console_setup(*cmdline_p) == 0) |
568 | mark_bsp_online(); | |
569 | ||
1da177e4 LT |
570 | /* Initialize the ACPI boot-time table parser */ |
571 | acpi_table_init(); | |
62ee0540 | 572 | early_acpi_boot_init(); |
2e0f2b16 | 573 | #ifdef CONFIG_ACPI_NUMA |
1da177e4 | 574 | acpi_numa_init(); |
312521d0 | 575 | acpi_numa_fixup(); |
2e0f2b16 | 576 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
62ee0540 | 577 | prefill_possible_map(); |
2e0f2b16 | 578 | #endif |
5d2068da RR |
579 | per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? |
580 | 32 : cpumask_weight(&early_cpu_possible_map)), | |
dd4f0888 | 581 | additional_cpus > 0 ? additional_cpus : 0); |
2e0f2b16 | 582 | #endif /* CONFIG_ACPI_NUMA */ |
1da177e4 | 583 | |
12cda817 TH |
584 | #ifdef CONFIG_SMP |
585 | smp_build_cpu_map(); | |
586 | #endif | |
1da177e4 LT |
587 | find_memory(); |
588 | ||
589 | /* process SAL system table: */ | |
5828efb9 | 590 | ia64_sal_init(__va(sal_systab_phys)); |
1da177e4 | 591 | |
4dcc29e1 TL |
592 | #ifdef CONFIG_ITANIUM |
593 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | |
594 | #else | |
595 | { | |
e088a4ad | 596 | unsigned long num_phys_stacked; |
4dcc29e1 TL |
597 | |
598 | if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) | |
599 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | |
600 | } | |
601 | #endif | |
602 | ||
1da177e4 LT |
603 | #ifdef CONFIG_SMP |
604 | cpu_physical_id(0) = hard_smp_processor_id(); | |
605 | #endif | |
606 | ||
607 | cpu_init(); /* initialize the bootstrap CPU */ | |
dcc17d1b | 608 | mmu_context_init(); /* initialize context_id bitmap */ |
1da177e4 | 609 | |
1da177e4 LT |
610 | #ifdef CONFIG_VT |
611 | if (!conswitchp) { | |
1da177e4 LT |
612 | # if defined(CONFIG_VGA_CONSOLE) |
613 | /* | |
614 | * Non-legacy systems may route legacy VGA MMIO range to system | |
615 | * memory. vga_con probes the MMIO hole, so memory looks like | |
616 | * a VGA device to it. The EFI memory map can tell us if it's | |
617 | * memory so we can avoid this problem. | |
618 | */ | |
619 | if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) | |
620 | conswitchp = &vga_con; | |
621 | # endif | |
622 | } | |
623 | #endif | |
624 | ||
625 | /* enable IA-64 Machine Check Abort Handling unless disabled */ | |
a5b00bb4 | 626 | if (!nomca) |
1da177e4 LT |
627 | ia64_mca_init(); |
628 | ||
fa809d70 CH |
629 | /* |
630 | * Default to /dev/sda2. This assumes that the EFI partition | |
631 | * is physical disk 1 partition 1 and the Linux root disk is | |
632 | * physical disk 1 partition 2. | |
633 | */ | |
634 | ROOT_DEV = Root_SDA2; /* default to second partition on first drive */ | |
635 | ||
df41017e CH |
636 | if (is_uv_system()) |
637 | uv_setup(cmdline_p); | |
638 | #ifdef CONFIG_SMP | |
639 | else | |
640 | init_smp_config(); | |
641 | #endif | |
642 | ||
a8384e6c | 643 | screen_info_setup(); |
1da177e4 | 644 | paging_init(); |
acb04058 PZ |
645 | |
646 | clear_sched_clock_stable(); | |
1da177e4 LT |
647 | } |
648 | ||
649 | /* | |
72fdbdce | 650 | * Display cpu info for all CPUs. |
1da177e4 LT |
651 | */ |
652 | static int | |
653 | show_cpuinfo (struct seq_file *m, void *v) | |
654 | { | |
655 | #ifdef CONFIG_SMP | |
656 | # define lpj c->loops_per_jiffy | |
657 | # define cpunum c->cpu | |
658 | #else | |
659 | # define lpj loops_per_jiffy | |
660 | # define cpunum 0 | |
661 | #endif | |
662 | static struct { | |
663 | unsigned long mask; | |
664 | const char *feature_name; | |
665 | } feature_bits[] = { | |
666 | { 1UL << 0, "branchlong" }, | |
667 | { 1UL << 1, "spontaneous deferral"}, | |
668 | { 1UL << 2, "16-byte atomic ops" } | |
669 | }; | |
ae0af3e3 | 670 | char features[128], *cp, *sep; |
1da177e4 LT |
671 | struct cpuinfo_ia64 *c = v; |
672 | unsigned long mask; | |
38c0b2c2 | 673 | unsigned long proc_freq; |
ae0af3e3 | 674 | int i, size; |
1da177e4 LT |
675 | |
676 | mask = c->features; | |
677 | ||
1da177e4 | 678 | /* build the feature string: */ |
ae0af3e3 | 679 | memcpy(features, "standard", 9); |
1da177e4 | 680 | cp = features; |
ae0af3e3 AG |
681 | size = sizeof(features); |
682 | sep = ""; | |
683 | for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { | |
1da177e4 | 684 | if (mask & feature_bits[i].mask) { |
ae0af3e3 AG |
685 | cp += snprintf(cp, size, "%s%s", sep, |
686 | feature_bits[i].feature_name), | |
687 | sep = ", "; | |
1da177e4 | 688 | mask &= ~feature_bits[i].mask; |
ae0af3e3 | 689 | size = sizeof(features) - (cp - features); |
1da177e4 LT |
690 | } |
691 | } | |
ae0af3e3 AG |
692 | if (mask && size > 1) { |
693 | /* print unknown features as a hex value */ | |
694 | snprintf(cp, size, "%s0x%lx", sep, mask); | |
1da177e4 LT |
695 | } |
696 | ||
95235ca2 VP |
697 | proc_freq = cpufreq_quick_get(cpunum); |
698 | if (!proc_freq) | |
699 | proc_freq = c->proc_freq / 1000; | |
700 | ||
1da177e4 LT |
701 | seq_printf(m, |
702 | "processor : %d\n" | |
703 | "vendor : %s\n" | |
704 | "arch : IA-64\n" | |
76d08bb3 | 705 | "family : %u\n" |
1da177e4 | 706 | "model : %u\n" |
76d08bb3 | 707 | "model name : %s\n" |
1da177e4 LT |
708 | "revision : %u\n" |
709 | "archrev : %u\n" | |
ae0af3e3 | 710 | "features : %s\n" |
1da177e4 LT |
711 | "cpu number : %lu\n" |
712 | "cpu regs : %u\n" | |
8a3a78d1 | 713 | "cpu MHz : %lu.%03lu\n" |
1da177e4 | 714 | "itc MHz : %lu.%06lu\n" |
e927ecb0 | 715 | "BogoMIPS : %lu.%02lu\n", |
76d08bb3 TL |
716 | cpunum, c->vendor, c->family, c->model, |
717 | c->model_name, c->revision, c->archrev, | |
1da177e4 | 718 | features, c->ppn, c->number, |
95235ca2 | 719 | proc_freq / 1000, proc_freq % 1000, |
1da177e4 LT |
720 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
721 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | |
e927ecb0 | 722 | #ifdef CONFIG_SMP |
5d2068da RR |
723 | seq_printf(m, "siblings : %u\n", |
724 | cpumask_weight(&cpu_core_map[cpunum])); | |
113134fc AC |
725 | if (c->socket_id != -1) |
726 | seq_printf(m, "physical id: %u\n", c->socket_id); | |
e927ecb0 SS |
727 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) |
728 | seq_printf(m, | |
113134fc AC |
729 | "core id : %u\n" |
730 | "thread id : %u\n", | |
731 | c->core_id, c->thread_id); | |
e927ecb0 SS |
732 | #endif |
733 | seq_printf(m,"\n"); | |
734 | ||
1da177e4 LT |
735 | return 0; |
736 | } | |
737 | ||
738 | static void * | |
739 | c_start (struct seq_file *m, loff_t *pos) | |
740 | { | |
741 | #ifdef CONFIG_SMP | |
5dd3c994 | 742 | while (*pos < nr_cpu_ids && !cpu_online(*pos)) |
1da177e4 LT |
743 | ++*pos; |
744 | #endif | |
5dd3c994 | 745 | return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; |
1da177e4 LT |
746 | } |
747 | ||
748 | static void * | |
749 | c_next (struct seq_file *m, void *v, loff_t *pos) | |
750 | { | |
751 | ++*pos; | |
752 | return c_start(m, pos); | |
753 | } | |
754 | ||
755 | static void | |
756 | c_stop (struct seq_file *m, void *v) | |
757 | { | |
758 | } | |
759 | ||
a23fe55e | 760 | const struct seq_operations cpuinfo_op = { |
1da177e4 LT |
761 | .start = c_start, |
762 | .next = c_next, | |
763 | .stop = c_stop, | |
764 | .show = show_cpuinfo | |
765 | }; | |
766 | ||
c5e83e3f JS |
767 | #define MAX_BRANDS 8 |
768 | static char brandname[MAX_BRANDS][128]; | |
76d08bb3 | 769 | |
ccce9bb8 | 770 | static char * |
76d08bb3 TL |
771 | get_model_name(__u8 family, __u8 model) |
772 | { | |
c5e83e3f | 773 | static int overflow; |
76d08bb3 | 774 | char brand[128]; |
c5e83e3f | 775 | int i; |
76d08bb3 | 776 | |
75f6a1de | 777 | memcpy(brand, "Unknown", 8); |
76d08bb3 TL |
778 | if (ia64_pal_get_brand_info(brand)) { |
779 | if (family == 0x7) | |
780 | memcpy(brand, "Merced", 7); | |
781 | else if (family == 0x1f) switch (model) { | |
782 | case 0: memcpy(brand, "McKinley", 9); break; | |
783 | case 1: memcpy(brand, "Madison", 8); break; | |
784 | case 2: memcpy(brand, "Madison up to 9M cache", 23); break; | |
75f6a1de | 785 | } |
76d08bb3 | 786 | } |
c5e83e3f JS |
787 | for (i = 0; i < MAX_BRANDS; i++) |
788 | if (strcmp(brandname[i], brand) == 0) | |
789 | return brandname[i]; | |
790 | for (i = 0; i < MAX_BRANDS; i++) | |
791 | if (brandname[i][0] == '\0') | |
792 | return strcpy(brandname[i], brand); | |
793 | if (overflow++ == 0) | |
794 | printk(KERN_ERR | |
795 | "%s: Table overflow. Some processor model information will be missing\n", | |
d4ed8084 | 796 | __func__); |
c5e83e3f | 797 | return "Unknown"; |
76d08bb3 TL |
798 | } |
799 | ||
ccce9bb8 | 800 | static void |
1da177e4 LT |
801 | identify_cpu (struct cpuinfo_ia64 *c) |
802 | { | |
803 | union { | |
804 | unsigned long bits[5]; | |
805 | struct { | |
806 | /* id 0 & 1: */ | |
807 | char vendor[16]; | |
808 | ||
809 | /* id 2 */ | |
810 | u64 ppn; /* processor serial number */ | |
811 | ||
812 | /* id 3: */ | |
813 | unsigned number : 8; | |
814 | unsigned revision : 8; | |
815 | unsigned model : 8; | |
816 | unsigned family : 8; | |
817 | unsigned archrev : 8; | |
818 | unsigned reserved : 24; | |
819 | ||
820 | /* id 4: */ | |
821 | u64 features; | |
822 | } field; | |
823 | } cpuid; | |
824 | pal_vm_info_1_u_t vm1; | |
825 | pal_vm_info_2_u_t vm2; | |
826 | pal_status_t status; | |
827 | unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ | |
828 | int i; | |
1da177e4 LT |
829 | for (i = 0; i < 5; ++i) |
830 | cpuid.bits[i] = ia64_get_cpuid(i); | |
831 | ||
832 | memcpy(c->vendor, cpuid.field.vendor, 16); | |
833 | #ifdef CONFIG_SMP | |
834 | c->cpu = smp_processor_id(); | |
e927ecb0 SS |
835 | |
836 | /* below default values will be overwritten by identify_siblings() | |
72fdbdce | 837 | * for Multi-Threading/Multi-Core capable CPUs |
e927ecb0 SS |
838 | */ |
839 | c->threads_per_core = c->cores_per_socket = c->num_log = 1; | |
840 | c->socket_id = -1; | |
841 | ||
842 | identify_siblings(c); | |
113134fc AC |
843 | |
844 | if (c->threads_per_core > smp_num_siblings) | |
845 | smp_num_siblings = c->threads_per_core; | |
1da177e4 LT |
846 | #endif |
847 | c->ppn = cpuid.field.ppn; | |
848 | c->number = cpuid.field.number; | |
849 | c->revision = cpuid.field.revision; | |
850 | c->model = cpuid.field.model; | |
851 | c->family = cpuid.field.family; | |
852 | c->archrev = cpuid.field.archrev; | |
853 | c->features = cpuid.field.features; | |
76d08bb3 | 854 | c->model_name = get_model_name(c->family, c->model); |
1da177e4 LT |
855 | |
856 | status = ia64_pal_vm_summary(&vm1, &vm2); | |
857 | if (status == PAL_STATUS_SUCCESS) { | |
858 | impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; | |
859 | phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; | |
860 | } | |
861 | c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); | |
862 | c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); | |
863 | } | |
864 | ||
08357f82 | 865 | /* |
62fdd767 | 866 | * Do the following calculations: |
08357f82 | 867 | * |
62fdd767 FY |
868 | * 1. the max. cache line size. |
869 | * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". | |
870 | * 3. the minimum of the cache stride sizes for "clflush_cache_range()". | |
08357f82 | 871 | */ |
ccce9bb8 | 872 | static void |
62fdd767 | 873 | get_cache_info(void) |
1da177e4 LT |
874 | { |
875 | unsigned long line_size, max = 1; | |
e088a4ad MW |
876 | unsigned long l, levels, unique_caches; |
877 | pal_cache_config_info_t cci; | |
878 | long status; | |
1da177e4 LT |
879 | |
880 | status = ia64_pal_cache_summary(&levels, &unique_caches); | |
881 | if (status != 0) { | |
882 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", | |
d4ed8084 | 883 | __func__, status); |
1da177e4 | 884 | max = SMP_CACHE_BYTES; |
08357f82 ZM |
885 | /* Safest setup for "flush_icache_range()" */ |
886 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; | |
62fdd767 FY |
887 | /* Safest setup for "clflush_cache_range()" */ |
888 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | |
1da177e4 LT |
889 | goto out; |
890 | } | |
891 | ||
892 | for (l = 0; l < levels; ++l) { | |
62fdd767 FY |
893 | /* cache_type (data_or_unified)=2 */ |
894 | status = ia64_pal_cache_config_info(l, 2, &cci); | |
1da177e4 | 895 | if (status != 0) { |
e088a4ad MW |
896 | printk(KERN_ERR "%s: ia64_pal_cache_config_info" |
897 | "(l=%lu, 2) failed (status=%ld)\n", | |
898 | __func__, l, status); | |
1da177e4 | 899 | max = SMP_CACHE_BYTES; |
08357f82 ZM |
900 | /* The safest setup for "flush_icache_range()" */ |
901 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; | |
62fdd767 FY |
902 | /* The safest setup for "clflush_cache_range()" */ |
903 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | |
08357f82 | 904 | cci.pcci_unified = 1; |
62fdd767 FY |
905 | } else { |
906 | if (cci.pcci_stride < ia64_cache_stride_shift) | |
907 | ia64_cache_stride_shift = cci.pcci_stride; | |
908 | ||
909 | line_size = 1 << cci.pcci_line_size; | |
910 | if (line_size > max) | |
911 | max = line_size; | |
1da177e4 | 912 | } |
62fdd767 | 913 | |
08357f82 | 914 | if (!cci.pcci_unified) { |
62fdd767 FY |
915 | /* cache_type (instruction)=1*/ |
916 | status = ia64_pal_cache_config_info(l, 1, &cci); | |
08357f82 | 917 | if (status != 0) { |
e088a4ad MW |
918 | printk(KERN_ERR "%s: ia64_pal_cache_config_info" |
919 | "(l=%lu, 1) failed (status=%ld)\n", | |
d4ed8084 | 920 | __func__, l, status); |
e088a4ad | 921 | /* The safest setup for flush_icache_range() */ |
08357f82 ZM |
922 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; |
923 | } | |
924 | } | |
925 | if (cci.pcci_stride < ia64_i_cache_stride_shift) | |
926 | ia64_i_cache_stride_shift = cci.pcci_stride; | |
927 | } | |
1da177e4 LT |
928 | out: |
929 | if (max > ia64_max_cacheline_size) | |
930 | ia64_max_cacheline_size = max; | |
931 | } | |
932 | ||
933 | /* | |
934 | * cpu_init() initializes state that is per-CPU. This function acts | |
935 | * as a 'CPU state barrier', nothing should get across. | |
936 | */ | |
ccce9bb8 | 937 | void |
1da177e4 LT |
938 | cpu_init (void) |
939 | { | |
ccce9bb8 | 940 | extern void ia64_mmu_init(void *); |
a0776ec8 | 941 | static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; |
1da177e4 LT |
942 | unsigned long num_phys_stacked; |
943 | pal_vm_info_2_u_t vmi; | |
944 | unsigned int max_ctx; | |
945 | struct cpuinfo_ia64 *cpu_info; | |
946 | void *cpu_data; | |
947 | ||
948 | cpu_data = per_cpu_init(); | |
4d1efed5 | 949 | #ifdef CONFIG_SMP |
d5a7430d MT |
950 | /* |
951 | * insert boot cpu into sibling and core mapes | |
952 | * (must be done after per_cpu area is setup) | |
953 | */ | |
954 | if (smp_processor_id() == 0) { | |
5d2068da RR |
955 | cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); |
956 | cpumask_set_cpu(0, &cpu_core_map[0]); | |
10617bbe TL |
957 | } else { |
958 | /* | |
959 | * Set ar.k3 so that assembly code in MCA handler can compute | |
960 | * physical addresses of per cpu variables with a simple: | |
961 | * phys = ar.k3 + &per_cpu_var | |
962 | * and the alt-dtlb-miss handler can set per-cpu mapping into | |
963 | * the TLB when needed. head.S already did this for cpu0. | |
964 | */ | |
965 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | |
966 | ia64_tpa(cpu_data) - (long) __per_cpu_start); | |
d5a7430d | 967 | } |
4d1efed5 | 968 | #endif |
1da177e4 | 969 | |
62fdd767 | 970 | get_cache_info(); |
1da177e4 LT |
971 | |
972 | /* | |
973 | * We can't pass "local_cpu_data" to identify_cpu() because we haven't called | |
974 | * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it | |
975 | * depends on the data returned by identify_cpu(). We break the dependency by | |
976 | * accessing cpu_data() through the canonical per-CPU address. | |
977 | */ | |
877105cc | 978 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
1da177e4 LT |
979 | identify_cpu(cpu_info); |
980 | ||
981 | #ifdef CONFIG_MCKINLEY | |
982 | { | |
983 | # define FEATURE_SET 16 | |
984 | struct ia64_pal_retval iprv; | |
985 | ||
986 | if (cpu_info->family == 0x1f) { | |
987 | PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); | |
988 | if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) | |
989 | PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, | |
990 | (iprv.v1 | 0x80), FEATURE_SET, 0); | |
991 | } | |
992 | } | |
993 | #endif | |
994 | ||
995 | /* Clear the stack memory reserved for pt_regs: */ | |
6450578f | 996 | memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); |
1da177e4 LT |
997 | |
998 | ia64_set_kr(IA64_KR_FPU_OWNER, 0); | |
999 | ||
1000 | /* | |
1001 | * Initialize the page-table base register to a global | |
1002 | * directory with all zeroes. This ensure that we can handle | |
1003 | * TLB-misses to user address-space even before we created the | |
1004 | * first user address-space. This may happen, e.g., due to | |
1005 | * aggressive use of lfetch.fault. | |
1006 | */ | |
1007 | ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); | |
1008 | ||
1009 | /* | |
86ebacd3 TL |
1010 | * Initialize default control register to defer speculative faults except |
1011 | * for those arising from TLB misses, which are not deferred. The | |
1da177e4 LT |
1012 | * kernel MUST NOT depend on a particular setting of these bits (in other words, |
1013 | * the kernel must have recovery code for all speculative accesses). Turn on | |
1014 | * dcr.lc as per recommendation by the architecture team. Most IA-32 apps | |
1015 | * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll | |
1016 | * be fine). | |
1017 | */ | |
1018 | ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | |
1019 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | |
f1f10076 | 1020 | mmgrab(&init_mm); |
1da177e4 | 1021 | current->active_mm = &init_mm; |
80a03e29 | 1022 | BUG_ON(current->mm); |
1da177e4 LT |
1023 | |
1024 | ia64_mmu_init(ia64_imva(cpu_data)); | |
1025 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | |
1026 | ||
72fdbdce | 1027 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ |
1da177e4 LT |
1028 | ia64_set_itc(0); |
1029 | ||
1030 | /* disable all local interrupt sources: */ | |
1031 | ia64_set_itv(1 << 16); | |
1032 | ia64_set_lrr0(1 << 16); | |
1033 | ia64_set_lrr1(1 << 16); | |
1034 | ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); | |
1035 | ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); | |
1036 | ||
1037 | /* clear TPR & XTP to enable all interrupt classes: */ | |
1038 | ia64_setreg(_IA64_REG_CR_TPR, 0); | |
f740e6c9 KK |
1039 | |
1040 | /* Clear any pending interrupts left by SAL/EFI */ | |
1041 | while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) | |
1042 | ia64_eoi(); | |
1043 | ||
1da177e4 LT |
1044 | #ifdef CONFIG_SMP |
1045 | normal_xtp(); | |
1046 | #endif | |
1047 | ||
1048 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ | |
2046b94e | 1049 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) { |
1da177e4 | 1050 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; |
a6c75b86 | 1051 | setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); |
2046b94e | 1052 | } else { |
1da177e4 LT |
1053 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); |
1054 | max_ctx = (1U << 15) - 1; /* use architected minimum */ | |
1055 | } | |
1056 | while (max_ctx < ia64_ctx.max_ctx) { | |
1057 | unsigned int old = ia64_ctx.max_ctx; | |
1058 | if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) | |
1059 | break; | |
1060 | } | |
1061 | ||
1062 | if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { | |
1063 | printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " | |
1064 | "stacked regs\n"); | |
1065 | num_phys_stacked = 96; | |
1066 | } | |
1067 | /* size of physical stacked register partition plus 8 bytes: */ | |
a0776ec8 KC |
1068 | if (num_phys_stacked > max_num_phys_stacked) { |
1069 | ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); | |
1070 | max_num_phys_stacked = num_phys_stacked; | |
1071 | } | |
1da177e4 LT |
1072 | } |
1073 | ||
244fd545 | 1074 | void __init |
1da177e4 LT |
1075 | check_bugs (void) |
1076 | { | |
1077 | ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, | |
1078 | (unsigned long) __end___mckinley_e9_bundles); | |
1079 | } | |
3ed3bce8 MD |
1080 | |
1081 | static int __init run_dmi_scan(void) | |
1082 | { | |
0fca0812 | 1083 | dmi_setup(); |
3ed3bce8 MD |
1084 | return 0; |
1085 | } | |
1086 | core_initcall(run_dmi_scan); |