]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/apic/x2apic_uv_x.c
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / apic / x2apic_uv_x.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
9 */
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
23 #include <linux/io.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
35 #include <asm/apic.h>
36 #include <asm/ipi.h>
37 #include <asm/smp.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
40 #include <asm/nmi.h>
41
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
47
48 DEFINE_PER_CPU(int, x2apic_extra_bits);
49
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
51
52 static enum uv_system_type uv_system_type;
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
55 int uv_min_hub_revision_id;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
57 unsigned int uv_apicid_hibits;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
59 static DEFINE_SPINLOCK(uv_nmi_lock);
60
61 static struct apic apic_x2apic_uv_x;
62
63 static unsigned long __init uv_early_read_mmr(unsigned long addr)
64 {
65 unsigned long val, *mmr;
66
67 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
68 val = *mmr;
69 early_iounmap(mmr, sizeof(*mmr));
70 return val;
71 }
72
73 static inline bool is_GRU_range(u64 start, u64 end)
74 {
75 return start >= gru_start_paddr && end <= gru_end_paddr;
76 }
77
78 static bool uv_is_untracked_pat_range(u64 start, u64 end)
79 {
80 return is_ISA_range(start, end) || is_GRU_range(start, end);
81 }
82
83 static int __init early_get_pnodeid(void)
84 {
85 union uvh_node_id_u node_id;
86 union uvh_rh_gam_config_mmr_u m_n_config;
87 int pnode;
88
89 /* Currently, all blades have same revision number */
90 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92 uv_min_hub_revision_id = node_id.s.revision;
93
94 if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
95 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
96
97 uv_hub_info->hub_revision = uv_min_hub_revision_id;
98 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
99 return pnode;
100 }
101
102 static void __init early_get_apic_pnode_shift(void)
103 {
104 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
105 if (!uvh_apicid.v)
106 /*
107 * Old bios, use default value
108 */
109 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
110 }
111
112 /*
113 * Add an extra bit as dictated by bios to the destination apicid of
114 * interrupts potentially passing through the UV HUB. This prevents
115 * a deadlock between interrupts and IO port operations.
116 */
117 static void __init uv_set_apicid_hibit(void)
118 {
119 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
120
121 if (is_uv1_hub()) {
122 apicid_mask.v =
123 uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
124 uv_apicid_hibits =
125 apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
126 }
127 }
128
129 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
130 {
131 int pnodeid, is_uv1, is_uv2;
132
133 is_uv1 = !strcmp(oem_id, "SGI");
134 is_uv2 = !strcmp(oem_id, "SGI2");
135 if (is_uv1 || is_uv2) {
136 uv_hub_info->hub_revision =
137 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
138 pnodeid = early_get_pnodeid();
139 early_get_apic_pnode_shift();
140 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
141 x86_platform.nmi_init = uv_nmi_init;
142 if (!strcmp(oem_table_id, "UVL"))
143 uv_system_type = UV_LEGACY_APIC;
144 else if (!strcmp(oem_table_id, "UVX"))
145 uv_system_type = UV_X2APIC;
146 else if (!strcmp(oem_table_id, "UVH")) {
147 __this_cpu_write(x2apic_extra_bits,
148 pnodeid << uvh_apicid.s.pnode_shift);
149 uv_system_type = UV_NON_UNIQUE_APIC;
150 uv_set_apicid_hibit();
151 return 1;
152 }
153 }
154 return 0;
155 }
156
157 enum uv_system_type get_uv_system_type(void)
158 {
159 return uv_system_type;
160 }
161
162 int is_uv_system(void)
163 {
164 return uv_system_type != UV_NONE;
165 }
166 EXPORT_SYMBOL_GPL(is_uv_system);
167
168 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
169 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
170
171 struct uv_blade_info *uv_blade_info;
172 EXPORT_SYMBOL_GPL(uv_blade_info);
173
174 short *uv_node_to_blade;
175 EXPORT_SYMBOL_GPL(uv_node_to_blade);
176
177 short *uv_cpu_to_blade;
178 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
179
180 short uv_possible_blades;
181 EXPORT_SYMBOL_GPL(uv_possible_blades);
182
183 unsigned long sn_rtc_cycles_per_second;
184 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
185
186 static const struct cpumask *uv_target_cpus(void)
187 {
188 return cpu_online_mask;
189 }
190
191 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
192 {
193 cpumask_clear(retmask);
194 cpumask_set_cpu(cpu, retmask);
195 }
196
197 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
198 {
199 #ifdef CONFIG_SMP
200 unsigned long val;
201 int pnode;
202
203 pnode = uv_apicid_to_pnode(phys_apicid);
204 phys_apicid |= uv_apicid_hibits;
205 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
206 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
207 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
208 APIC_DM_INIT;
209 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
210 mdelay(10);
211
212 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
213 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
214 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
215 APIC_DM_STARTUP;
216 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
217
218 atomic_set(&init_deasserted, 1);
219 #endif
220 return 0;
221 }
222
223 static void uv_send_IPI_one(int cpu, int vector)
224 {
225 unsigned long apicid;
226 int pnode;
227
228 apicid = per_cpu(x86_cpu_to_apicid, cpu);
229 pnode = uv_apicid_to_pnode(apicid);
230 uv_hub_send_ipi(pnode, apicid, vector);
231 }
232
233 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
234 {
235 unsigned int cpu;
236
237 for_each_cpu(cpu, mask)
238 uv_send_IPI_one(cpu, vector);
239 }
240
241 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
242 {
243 unsigned int this_cpu = smp_processor_id();
244 unsigned int cpu;
245
246 for_each_cpu(cpu, mask) {
247 if (cpu != this_cpu)
248 uv_send_IPI_one(cpu, vector);
249 }
250 }
251
252 static void uv_send_IPI_allbutself(int vector)
253 {
254 unsigned int this_cpu = smp_processor_id();
255 unsigned int cpu;
256
257 for_each_online_cpu(cpu) {
258 if (cpu != this_cpu)
259 uv_send_IPI_one(cpu, vector);
260 }
261 }
262
263 static void uv_send_IPI_all(int vector)
264 {
265 uv_send_IPI_mask(cpu_online_mask, vector);
266 }
267
268 static int uv_apic_id_registered(void)
269 {
270 return 1;
271 }
272
273 static void uv_init_apic_ldr(void)
274 {
275 }
276
277 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
278 {
279 /*
280 * We're using fixed IRQ delivery, can only return one phys APIC ID.
281 * May as well be the first.
282 */
283 int cpu = cpumask_first(cpumask);
284
285 if ((unsigned)cpu < nr_cpu_ids)
286 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
287 else
288 return BAD_APICID;
289 }
290
291 static unsigned int
292 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
293 const struct cpumask *andmask)
294 {
295 int cpu;
296
297 /*
298 * We're using fixed IRQ delivery, can only return one phys APIC ID.
299 * May as well be the first.
300 */
301 for_each_cpu_and(cpu, cpumask, andmask) {
302 if (cpumask_test_cpu(cpu, cpu_online_mask))
303 break;
304 }
305 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
306 }
307
308 static unsigned int x2apic_get_apic_id(unsigned long x)
309 {
310 unsigned int id;
311
312 WARN_ON(preemptible() && num_online_cpus() > 1);
313 id = x | __this_cpu_read(x2apic_extra_bits);
314
315 return id;
316 }
317
318 static unsigned long set_apic_id(unsigned int id)
319 {
320 unsigned long x;
321
322 /* maskout x2apic_extra_bits ? */
323 x = id;
324 return x;
325 }
326
327 static unsigned int uv_read_apic_id(void)
328 {
329
330 return x2apic_get_apic_id(apic_read(APIC_ID));
331 }
332
333 static int uv_phys_pkg_id(int initial_apicid, int index_msb)
334 {
335 return uv_read_apic_id() >> index_msb;
336 }
337
338 static void uv_send_IPI_self(int vector)
339 {
340 apic_write(APIC_SELF_IPI, vector);
341 }
342
343 static int uv_probe(void)
344 {
345 return apic == &apic_x2apic_uv_x;
346 }
347
348 static struct apic __refdata apic_x2apic_uv_x = {
349
350 .name = "UV large system",
351 .probe = uv_probe,
352 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
353 .apic_id_registered = uv_apic_id_registered,
354
355 .irq_delivery_mode = dest_Fixed,
356 .irq_dest_mode = 0, /* physical */
357
358 .target_cpus = uv_target_cpus,
359 .disable_esr = 0,
360 .dest_logical = APIC_DEST_LOGICAL,
361 .check_apicid_used = NULL,
362 .check_apicid_present = NULL,
363
364 .vector_allocation_domain = uv_vector_allocation_domain,
365 .init_apic_ldr = uv_init_apic_ldr,
366
367 .ioapic_phys_id_map = NULL,
368 .setup_apic_routing = NULL,
369 .multi_timer_check = NULL,
370 .cpu_present_to_apicid = default_cpu_present_to_apicid,
371 .apicid_to_cpu_present = NULL,
372 .setup_portio_remap = NULL,
373 .check_phys_apicid_present = default_check_phys_apicid_present,
374 .enable_apic_mode = NULL,
375 .phys_pkg_id = uv_phys_pkg_id,
376 .mps_oem_check = NULL,
377
378 .get_apic_id = x2apic_get_apic_id,
379 .set_apic_id = set_apic_id,
380 .apic_id_mask = 0xFFFFFFFFu,
381
382 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
383 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
384
385 .send_IPI_mask = uv_send_IPI_mask,
386 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
387 .send_IPI_allbutself = uv_send_IPI_allbutself,
388 .send_IPI_all = uv_send_IPI_all,
389 .send_IPI_self = uv_send_IPI_self,
390
391 .wakeup_secondary_cpu = uv_wakeup_secondary,
392 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
393 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
394 .wait_for_init_deassert = NULL,
395 .smp_callin_clear_local_apic = NULL,
396 .inquire_remote_apic = NULL,
397
398 .read = native_apic_msr_read,
399 .write = native_apic_msr_write,
400 .icr_read = native_x2apic_icr_read,
401 .icr_write = native_x2apic_icr_write,
402 .wait_icr_idle = native_x2apic_wait_icr_idle,
403 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
404 };
405
406 static __cpuinit void set_x2apic_extra_bits(int pnode)
407 {
408 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
409 }
410
411 /*
412 * Called on boot cpu.
413 */
414 static __init int boot_pnode_to_blade(int pnode)
415 {
416 int blade;
417
418 for (blade = 0; blade < uv_num_possible_blades(); blade++)
419 if (pnode == uv_blade_info[blade].pnode)
420 return blade;
421 BUG();
422 }
423
424 struct redir_addr {
425 unsigned long redirect;
426 unsigned long alias;
427 };
428
429 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
430
431 static __initdata struct redir_addr redir_addrs[] = {
432 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
433 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
434 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
435 };
436
437 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
438 {
439 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
440 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
441 int i;
442
443 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
444 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
445 if (alias.s.enable && alias.s.base == 0) {
446 *size = (1UL << alias.s.m_alias);
447 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
448 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
449 return;
450 }
451 }
452 *base = *size = 0;
453 }
454
455 enum map_type {map_wb, map_uc};
456
457 static __init void map_high(char *id, unsigned long base, int pshift,
458 int bshift, int max_pnode, enum map_type map_type)
459 {
460 unsigned long bytes, paddr;
461
462 paddr = base << pshift;
463 bytes = (1UL << bshift) * (max_pnode + 1);
464 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
465 paddr + bytes);
466 if (map_type == map_uc)
467 init_extra_mapping_uc(paddr, bytes);
468 else
469 init_extra_mapping_wb(paddr, bytes);
470
471 }
472 static __init void map_gru_high(int max_pnode)
473 {
474 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
475 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
476
477 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
478 if (gru.s.enable) {
479 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
480 gru_start_paddr = ((u64)gru.s.base << shift);
481 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
482
483 }
484 }
485
486 static __init void map_mmr_high(int max_pnode)
487 {
488 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
489 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
490
491 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
492 if (mmr.s.enable)
493 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
494 }
495
496 static __init void map_mmioh_high(int max_pnode)
497 {
498 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
499 int shift;
500
501 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
502 if (is_uv1_hub() && mmioh.s1.enable) {
503 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
504 map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
505 max_pnode, map_uc);
506 }
507 if (is_uv2_hub() && mmioh.s2.enable) {
508 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
509 map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
510 max_pnode, map_uc);
511 }
512 }
513
514 static __init void map_low_mmrs(void)
515 {
516 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
517 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
518 }
519
520 static __init void uv_rtc_init(void)
521 {
522 long status;
523 u64 ticks_per_sec;
524
525 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
526 &ticks_per_sec);
527 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
528 printk(KERN_WARNING
529 "unable to determine platform RTC clock frequency, "
530 "guessing.\n");
531 /* BIOS gives wrong value for clock freq. so guess */
532 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
533 } else
534 sn_rtc_cycles_per_second = ticks_per_sec;
535 }
536
537 /*
538 * percpu heartbeat timer
539 */
540 static void uv_heartbeat(unsigned long ignored)
541 {
542 struct timer_list *timer = &uv_hub_info->scir.timer;
543 unsigned char bits = uv_hub_info->scir.state;
544
545 /* flip heartbeat bit */
546 bits ^= SCIR_CPU_HEARTBEAT;
547
548 /* is this cpu idle? */
549 if (idle_cpu(raw_smp_processor_id()))
550 bits &= ~SCIR_CPU_ACTIVITY;
551 else
552 bits |= SCIR_CPU_ACTIVITY;
553
554 /* update system controller interface reg */
555 uv_set_scir_bits(bits);
556
557 /* enable next timer period */
558 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
559 }
560
561 static void __cpuinit uv_heartbeat_enable(int cpu)
562 {
563 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
564 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
565
566 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
567 setup_timer(timer, uv_heartbeat, cpu);
568 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
569 add_timer_on(timer, cpu);
570 uv_cpu_hub_info(cpu)->scir.enabled = 1;
571
572 /* also ensure that boot cpu is enabled */
573 cpu = 0;
574 }
575 }
576
577 #ifdef CONFIG_HOTPLUG_CPU
578 static void __cpuinit uv_heartbeat_disable(int cpu)
579 {
580 if (uv_cpu_hub_info(cpu)->scir.enabled) {
581 uv_cpu_hub_info(cpu)->scir.enabled = 0;
582 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
583 }
584 uv_set_cpu_scir_bits(cpu, 0xff);
585 }
586
587 /*
588 * cpu hotplug notifier
589 */
590 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
591 unsigned long action, void *hcpu)
592 {
593 long cpu = (long)hcpu;
594
595 switch (action) {
596 case CPU_ONLINE:
597 uv_heartbeat_enable(cpu);
598 break;
599 case CPU_DOWN_PREPARE:
600 uv_heartbeat_disable(cpu);
601 break;
602 default:
603 break;
604 }
605 return NOTIFY_OK;
606 }
607
608 static __init void uv_scir_register_cpu_notifier(void)
609 {
610 hotcpu_notifier(uv_scir_cpu_notify, 0);
611 }
612
613 #else /* !CONFIG_HOTPLUG_CPU */
614
615 static __init void uv_scir_register_cpu_notifier(void)
616 {
617 }
618
619 static __init int uv_init_heartbeat(void)
620 {
621 int cpu;
622
623 if (is_uv_system())
624 for_each_online_cpu(cpu)
625 uv_heartbeat_enable(cpu);
626 return 0;
627 }
628
629 late_initcall(uv_init_heartbeat);
630
631 #endif /* !CONFIG_HOTPLUG_CPU */
632
633 /* Direct Legacy VGA I/O traffic to designated IOH */
634 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
635 unsigned int command_bits, u32 flags)
636 {
637 int domain, bus, rc;
638
639 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
640 pdev->devfn, decode, command_bits, flags);
641
642 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
643 return 0;
644
645 if ((command_bits & PCI_COMMAND_IO) == 0)
646 return 0;
647
648 domain = pci_domain_nr(pdev->bus);
649 bus = pdev->bus->number;
650
651 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
652 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
653
654 return rc;
655 }
656
657 /*
658 * Called on each cpu to initialize the per_cpu UV data area.
659 * FIXME: hotplug not supported yet
660 */
661 void __cpuinit uv_cpu_init(void)
662 {
663 /* CPU 0 initilization will be done via uv_system_init. */
664 if (!uv_blade_info)
665 return;
666
667 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
668
669 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
670 set_x2apic_extra_bits(uv_hub_info->pnode);
671 }
672
673 /*
674 * When NMI is received, print a stack trace.
675 */
676 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
677 {
678 unsigned long real_uv_nmi;
679 int bid;
680
681 if (reason != DIE_NMIUNKNOWN)
682 return NOTIFY_OK;
683
684 if (in_crash_kexec)
685 /* do nothing if entering the crash kernel */
686 return NOTIFY_OK;
687
688 /*
689 * Each blade has an MMR that indicates when an NMI has been sent
690 * to cpus on the blade. If an NMI is detected, atomically
691 * clear the MMR and update a per-blade NMI count used to
692 * cause each cpu on the blade to notice a new NMI.
693 */
694 bid = uv_numa_blade_id();
695 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
696
697 if (unlikely(real_uv_nmi)) {
698 spin_lock(&uv_blade_info[bid].nmi_lock);
699 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
700 if (real_uv_nmi) {
701 uv_blade_info[bid].nmi_count++;
702 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
703 }
704 spin_unlock(&uv_blade_info[bid].nmi_lock);
705 }
706
707 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
708 return NOTIFY_DONE;
709
710 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
711
712 /*
713 * Use a lock so only one cpu prints at a time.
714 * This prevents intermixed output.
715 */
716 spin_lock(&uv_nmi_lock);
717 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
718 dump_stack();
719 spin_unlock(&uv_nmi_lock);
720
721 return NOTIFY_STOP;
722 }
723
724 static struct notifier_block uv_dump_stack_nmi_nb = {
725 .notifier_call = uv_handle_nmi,
726 .priority = NMI_LOCAL_LOW_PRIOR - 1,
727 };
728
729 void uv_register_nmi_notifier(void)
730 {
731 if (register_die_notifier(&uv_dump_stack_nmi_nb))
732 printk(KERN_WARNING "UV NMI handler failed to register\n");
733 }
734
735 void uv_nmi_init(void)
736 {
737 unsigned int value;
738
739 /*
740 * Unmask NMI on all cpus
741 */
742 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
743 value &= ~APIC_LVT_MASKED;
744 apic_write(APIC_LVT1, value);
745 }
746
747 void __init uv_system_init(void)
748 {
749 union uvh_rh_gam_config_mmr_u m_n_config;
750 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
751 union uvh_node_id_u node_id;
752 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
753 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
754 int gnode_extra, max_pnode = 0;
755 unsigned long mmr_base, present, paddr;
756 unsigned short pnode_mask, pnode_io_mask;
757
758 printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
759 map_low_mmrs();
760
761 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
762 m_val = m_n_config.s.m_skt;
763 n_val = m_n_config.s.n_skt;
764 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
765 n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
766 mmr_base =
767 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
768 ~UV_MMR_ENABLE;
769 pnode_mask = (1 << n_val) - 1;
770 pnode_io_mask = (1 << n_io) - 1;
771
772 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
773 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
774 gnode_upper = ((unsigned long)gnode_extra << m_val);
775 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
776 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
777
778 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
779
780 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
781 uv_possible_blades +=
782 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
783 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
784
785 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
786 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
787 BUG_ON(!uv_blade_info);
788
789 for (blade = 0; blade < uv_num_possible_blades(); blade++)
790 uv_blade_info[blade].memory_nid = -1;
791
792 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
793
794 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
795 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
796 BUG_ON(!uv_node_to_blade);
797 memset(uv_node_to_blade, 255, bytes);
798
799 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
800 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
801 BUG_ON(!uv_cpu_to_blade);
802 memset(uv_cpu_to_blade, 255, bytes);
803
804 blade = 0;
805 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
806 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
807 for (j = 0; j < 64; j++) {
808 if (!test_bit(j, &present))
809 continue;
810 pnode = (i * 64 + j) & pnode_mask;
811 uv_blade_info[blade].pnode = pnode;
812 uv_blade_info[blade].nr_possible_cpus = 0;
813 uv_blade_info[blade].nr_online_cpus = 0;
814 spin_lock_init(&uv_blade_info[blade].nmi_lock);
815 max_pnode = max(pnode, max_pnode);
816 blade++;
817 }
818 }
819
820 uv_bios_init();
821 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
822 &sn_region_size, &system_serial_number);
823 uv_rtc_init();
824
825 for_each_present_cpu(cpu) {
826 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
827
828 nid = cpu_to_node(cpu);
829 /*
830 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
831 */
832 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
833 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
834 uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
835
836 pnode = uv_apicid_to_pnode(apicid);
837 blade = boot_pnode_to_blade(pnode);
838 lcpu = uv_blade_info[blade].nr_possible_cpus;
839 uv_blade_info[blade].nr_possible_cpus++;
840
841 /* Any node on the blade, else will contain -1. */
842 uv_blade_info[blade].memory_nid = nid;
843
844 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
845 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
846 uv_cpu_hub_info(cpu)->m_val = m_val;
847 uv_cpu_hub_info(cpu)->n_val = n_val;
848 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
849 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
850 uv_cpu_hub_info(cpu)->pnode = pnode;
851 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
852 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
853 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
854 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
855 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
856 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
857 uv_node_to_blade[nid] = blade;
858 uv_cpu_to_blade[cpu] = blade;
859 }
860
861 /* Add blade/pnode info for nodes without cpus */
862 for_each_online_node(nid) {
863 if (uv_node_to_blade[nid] >= 0)
864 continue;
865 paddr = node_start_pfn(nid) << PAGE_SHIFT;
866 paddr = uv_soc_phys_ram_to_gpa(paddr);
867 pnode = (paddr >> m_val) & pnode_mask;
868 blade = boot_pnode_to_blade(pnode);
869 uv_node_to_blade[nid] = blade;
870 }
871
872 map_gru_high(max_pnode);
873 map_mmr_high(max_pnode);
874 map_mmioh_high(max_pnode & pnode_io_mask);
875
876 uv_cpu_init();
877 uv_scir_register_cpu_notifier();
878 uv_register_nmi_notifier();
879 proc_mkdir("sgi_uv", NULL);
880
881 /* register Legacy VGA I/O redirection handler */
882 pci_register_set_vga_state(uv_set_vga_state);
883
884 /*
885 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
886 * EFI is not enabled in the kdump kernel.
887 */
888 if (is_kdump_kernel())
889 reboot_type = BOOT_ACPI;
890 }
891
892 apic_driver(apic_x2apic_uv_x);