]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/platform/uv/uv_nmi.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / platform / uv / uv_nmi.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1e019421 2/*
b5dfcb09 3 * SGI NMI support routines
1e019421 4 *
1e019421
MT
5 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
6 * Copyright (c) Mike Travis
7 */
8
9#include <linux/cpu.h>
0d12ef0c 10#include <linux/delay.h>
e379ea82 11#include <linux/kdb.h>
12ba6c99 12#include <linux/kexec.h>
e379ea82 13#include <linux/kgdb.h>
cc3ae7b0 14#include <linux/moduleparam.h>
1e019421 15#include <linux/nmi.h>
0d12ef0c 16#include <linux/sched.h>
b17b0153 17#include <linux/sched/debug.h>
0d12ef0c 18#include <linux/slab.h>
d51953b0 19#include <linux/clocksource.h>
1e019421
MT
20
21#include <asm/apic.h>
0d12ef0c
MT
22#include <asm/current.h>
23#include <asm/kdebug.h>
24#include <asm/local64.h>
1e019421 25#include <asm/nmi.h>
e379ea82 26#include <asm/traps.h>
1e019421
MT
27#include <asm/uv/uv.h>
28#include <asm/uv/uv_hub.h>
29#include <asm/uv/uv_mmrs.h>
30
0d12ef0c
MT
31/*
32 * UV handler for NMI
33 *
34 * Handle system-wide NMI events generated by the global 'power nmi' command.
35 *
1e740163 36 * Basic operation is to field the NMI interrupt on each CPU and wait
37 * until all CPU's have arrived into the nmi handler. If some CPU's do not
0d12ef0c
MT
38 * make it into the handler, try and force them in with the IPI(NMI) signal.
39 *
40 * We also have to lessen UV Hub MMR accesses as much as possible as this
41 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
42 * can cause system problems to occur.
43 *
44 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
45 * chain. This reduces the number of false NMI calls when the perf
46 * tools are running which generate an enormous number of NMIs per
1e740163 47 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
0d12ef0c
MT
48 * very short as it only checks that if it has been "pinged" with the
49 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
50 *
51 */
52
53static struct uv_hub_nmi_s **uv_hub_nmi_list;
54
e1632170 55DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
0d12ef0c 56
abdf1df6 57/* UV hubless values */
58#define NMI_CONTROL_PORT 0x70
59#define NMI_DUMMY_PORT 0x71
56e17ca2 60#define PAD_OWN_GPP_D_0 0x2c
abdf1df6 61#define GPI_NMI_STS_GPP_D_0 0x164
62#define GPI_NMI_ENA_GPP_D_0 0x174
63#define STS_GPP_D_0_MASK 0x1
64#define PAD_CFG_DW0_GPP_D_0 0x4c0
65#define GPIROUTNMI (1ul << 17)
66#define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
67#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
68
69static u64 *pch_base;
0d12ef0c
MT
70static unsigned long nmi_mmr;
71static unsigned long nmi_mmr_clear;
72static unsigned long nmi_mmr_pending;
73
74static atomic_t uv_in_nmi;
75static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
76static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
77static atomic_t uv_nmi_slave_continue;
78static cpumask_var_t uv_nmi_cpu_mask;
79
80/* Values for uv_nmi_slave_continue */
81#define SLAVE_CLEAR 0
82#define SLAVE_CONTINUE 1
83#define SLAVE_EXIT 2
1e019421
MT
84
85/*
0d12ef0c
MT
86 * Default is all stack dumps go to the console and buffer.
87 * Lower level to send to log buffer only.
1e019421 88 */
a8fe19eb 89static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
0d12ef0c
MT
90module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
91
92/*
93 * The following values show statistics on how perf events are affecting
94 * this system.
95 */
96static int param_get_local64(char *buffer, const struct kernel_param *kp)
1e019421 97{
0d12ef0c
MT
98 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
99}
1e019421 100
0d12ef0c
MT
101static int param_set_local64(const char *val, const struct kernel_param *kp)
102{
1e740163 103 /* Clear on any write */
0d12ef0c
MT
104 local64_set((local64_t *)kp->arg, 0);
105 return 0;
106}
107
9c27847d 108static const struct kernel_param_ops param_ops_local64 = {
0d12ef0c
MT
109 .get = param_get_local64,
110 .set = param_set_local64,
111};
112#define param_check_local64(name, p) __param_check(name, p, local64_t)
113
114static local64_t uv_nmi_count;
115module_param_named(nmi_count, uv_nmi_count, local64, 0644);
116
117static local64_t uv_nmi_misses;
118module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
119
120static local64_t uv_nmi_ping_count;
121module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
122
123static local64_t uv_nmi_ping_misses;
124module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
125
126/*
127 * Following values allow tuning for large systems under heavy loading
128 */
129static int uv_nmi_initial_delay = 100;
130module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
131
132static int uv_nmi_slave_delay = 100;
133module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
134
135static int uv_nmi_loop_delay = 100;
136module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
137
138static int uv_nmi_trigger_delay = 10000;
139module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
140
141static int uv_nmi_wait_count = 100;
142module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
143
144static int uv_nmi_retry_count = 500;
145module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
146
abdf1df6 147static bool uv_pch_intr_enable = true;
148static bool uv_pch_intr_now_enabled;
149module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
150
56e17ca2 151static bool uv_pch_init_enable = true;
152module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
153
abdf1df6 154static int uv_nmi_debug;
155module_param_named(debug, uv_nmi_debug, int, 0644);
156
157#define nmi_debug(fmt, ...) \
158 do { \
159 if (uv_nmi_debug) \
160 pr_info(fmt, ##__VA_ARGS__); \
161 } while (0)
162
f550e469 163/* Valid NMI Actions */
164#define ACTION_LEN 16
165static struct nmi_action {
166 char *action;
167 char *desc;
168} valid_acts[] = {
169 { "kdump", "do kernel crash dump" },
170 { "dump", "dump process stack for each cpu" },
171 { "ips", "dump Inst Ptr info for each cpu" },
172 { "kdb", "enter KDB (needs kgdboc= assignment)" },
173 { "kgdb", "enter KGDB (needs gdb target remote)" },
174 { "health", "check if CPUs respond to NMI" },
175};
176typedef char action_t[ACTION_LEN];
177static action_t uv_nmi_action = { "dump" };
178
179static int param_get_action(char *buffer, const struct kernel_param *kp)
180{
181 return sprintf(buffer, "%s\n", uv_nmi_action);
182}
183
184static int param_set_action(const char *val, const struct kernel_param *kp)
185{
186 int i;
187 int n = ARRAY_SIZE(valid_acts);
188 char arg[ACTION_LEN], *p;
189
190 /* (remove possible '\n') */
191 strncpy(arg, val, ACTION_LEN - 1);
192 arg[ACTION_LEN - 1] = '\0';
193 p = strchr(arg, '\n');
194 if (p)
195 *p = '\0';
196
197 for (i = 0; i < n; i++)
198 if (!strcmp(arg, valid_acts[i].action))
199 break;
200
201 if (i < n) {
202 strcpy(uv_nmi_action, arg);
203 pr_info("UV: New NMI action:%s\n", uv_nmi_action);
204 return 0;
205 }
206
207 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
208 for (i = 0; i < n; i++)
209 pr_err("UV: %-8s - %s\n",
210 valid_acts[i].action, valid_acts[i].desc);
211 return -EINVAL;
212}
213
214static const struct kernel_param_ops param_ops_action = {
215 .get = param_get_action,
216 .set = param_set_action,
217};
218#define param_check_action(name, p) __param_check(name, p, action_t)
219
220module_param_named(action, uv_nmi_action, action, 0644);
3c121d9a
MT
221
222static inline bool uv_nmi_action_is(const char *action)
223{
224 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
225}
226
0d12ef0c
MT
227/* Setup which NMI support is present in system */
228static void uv_nmi_setup_mmrs(void)
229{
230 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
231 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
232 1UL << UVH_NMI_MMRX_REQ_SHIFT);
233 nmi_mmr = UVH_NMI_MMRX;
234 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
235 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
236 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
237 } else {
238 nmi_mmr = UVH_NMI_MMR;
239 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
240 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
241 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
242 }
243}
244
245/* Read NMI MMR and check if NMI flag was set by BMC. */
246static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
247{
248 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
249 atomic_inc(&hub_nmi->read_mmr_count);
250 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
251}
252
253static inline void uv_local_mmr_clear_nmi(void)
254{
255 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
256}
257
abdf1df6 258/*
259 * UV hubless NMI handler functions
260 */
261static inline void uv_reassert_nmi(void)
262{
263 /* (from arch/x86/include/asm/mach_traps.h) */
264 outb(0x8f, NMI_CONTROL_PORT);
265 inb(NMI_DUMMY_PORT); /* dummy read */
266 outb(0x0f, NMI_CONTROL_PORT);
267 inb(NMI_DUMMY_PORT); /* dummy read */
268}
269
270static void uv_init_hubless_pch_io(int offset, int mask, int data)
271{
272 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
273 int readd = readl(addr);
274
275 if (mask) { /* OR in new data */
276 int writed = (readd & ~mask) | data;
277
278 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
279 addr, readd, ~mask, data, writed);
280 writel(writed, addr);
281 } else if (readd & data) { /* clear status bit */
282 nmi_debug("UV:PCH: %p = %x\n", addr, data);
283 writel(data, addr);
284 }
285
286 (void)readl(addr); /* flush write data */
287}
288
289static void uv_nmi_setup_hubless_intr(void)
290{
291 uv_pch_intr_now_enabled = uv_pch_intr_enable;
292
293 uv_init_hubless_pch_io(
294 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
295 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
296
297 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
298 uv_pch_intr_now_enabled ? "enabled" : "disabled");
299}
300
56e17ca2 301static struct init_nmi {
302 unsigned int offset;
303 unsigned int mask;
304 unsigned int data;
305} init_nmi[] = {
306 { /* HOSTSW_OWN_GPP_D_0 */
307 .offset = 0x84,
308 .mask = 0x1,
309 .data = 0x0, /* ACPI Mode */
310 },
311
1e740163 312/* Clear status: */
56e17ca2 313 { /* GPI_INT_STS_GPP_D_0 */
314 .offset = 0x104,
315 .mask = 0x0,
316 .data = 0x1, /* Clear Status */
317 },
318 { /* GPI_GPE_STS_GPP_D_0 */
319 .offset = 0x124,
320 .mask = 0x0,
321 .data = 0x1, /* Clear Status */
322 },
323 { /* GPI_SMI_STS_GPP_D_0 */
324 .offset = 0x144,
325 .mask = 0x0,
326 .data = 0x1, /* Clear Status */
327 },
328 { /* GPI_NMI_STS_GPP_D_0 */
329 .offset = 0x164,
330 .mask = 0x0,
331 .data = 0x1, /* Clear Status */
332 },
333
1e740163 334/* Disable interrupts: */
56e17ca2 335 { /* GPI_INT_EN_GPP_D_0 */
336 .offset = 0x114,
337 .mask = 0x1,
1e740163 338 .data = 0x0, /* Disable interrupt generation */
56e17ca2 339 },
340 { /* GPI_GPE_EN_GPP_D_0 */
341 .offset = 0x134,
342 .mask = 0x1,
1e740163 343 .data = 0x0, /* Disable interrupt generation */
56e17ca2 344 },
345 { /* GPI_SMI_EN_GPP_D_0 */
346 .offset = 0x154,
347 .mask = 0x1,
1e740163 348 .data = 0x0, /* Disable interrupt generation */
56e17ca2 349 },
350 { /* GPI_NMI_EN_GPP_D_0 */
351 .offset = 0x174,
352 .mask = 0x1,
1e740163 353 .data = 0x0, /* Disable interrupt generation */
56e17ca2 354 },
355
1e740163 356/* Setup GPP_D_0 Pad Config: */
56e17ca2 357 { /* PAD_CFG_DW0_GPP_D_0 */
358 .offset = 0x4c0,
359 .mask = 0xffffffff,
360 .data = 0x82020100,
361/*
362 * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default)
363 *
364 * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
365 * from RX buffer (default)
366 *
367 * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override
368 *
369 * 26:25 RX Level/Edge Configuration (RXEVCFG):
370 * = 0h # Level
371 * = 1h # Edge
372 *
373 * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high)
374 *
375 * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
376 * = 0 # Routing does not cause peripheral IRQ...
377 * # (we want an NMI not an IRQ)
378 *
379 * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
380 * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
381 * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
382 *
383 * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
384 * 9 GPIO RX Disable (GPIORXDIS):
385 * = 0 # Enable the input buffer (active low enable)
386 *
387 * 8 GPIO TX Disable (GPIOTXDIS):
388 * = 1 # Disable the output buffer; i.e. Hi-Z
389 *
390 * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
391 * 0 GPIO TX State (GPIOTXSTATE):
392 * = 0 # (Leave at default)
393 */
394 },
395
396/* Pad Config DW1 */
397 { /* PAD_CFG_DW1_GPP_D_0 */
398 .offset = 0x4c4,
399 .mask = 0x3c00,
400 .data = 0, /* Termination = none (default) */
401 },
402};
403
404static void uv_init_hubless_pch_d0(void)
405{
406 int i, read;
407
408 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
409 if (read != 0) {
410 pr_info("UV: Hubless NMI already configured\n");
411 return;
412 }
413
414 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
415 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
416 uv_init_hubless_pch_io(init_nmi[i].offset,
417 init_nmi[i].mask,
418 init_nmi[i].data);
419 }
420}
421
abdf1df6 422static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
423{
424 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
425 int status = *pstat;
426
427 hub_nmi->nmi_value = status;
428 atomic_inc(&hub_nmi->read_mmr_count);
429
430 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
431 return 0;
432
433 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
1e740163 434 (void)*pstat; /* Flush write */
abdf1df6 435
436 return 1;
437}
438
439static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
440{
441 if (hub_nmi->hub_present)
442 return uv_nmi_test_mmr(hub_nmi);
443
444 if (hub_nmi->pch_owner) /* Only PCH owner can check status */
445 return uv_nmi_test_hubless(hub_nmi);
446
447 return -1;
448}
449
0d12ef0c 450/*
1e740163 451 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
452 * return true. If first CPU in on the system, set global "in_nmi" flag.
0d12ef0c
MT
453 */
454static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
455{
456 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
457
458 if (first) {
459 atomic_set(&hub_nmi->cpu_owner, cpu);
460 if (atomic_add_unless(&uv_in_nmi, 1, 1))
461 atomic_set(&uv_nmi_cpu, cpu);
462
463 atomic_inc(&hub_nmi->nmi_count);
464 }
465 return first;
466}
467
468/* Check if this is a system NMI event */
469static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
470{
471 int cpu = smp_processor_id();
472 int nmi = 0;
abdf1df6 473 int nmi_detected = 0;
0d12ef0c
MT
474
475 local64_inc(&uv_nmi_count);
e1632170 476 this_cpu_inc(uv_cpu_nmi.queries);
0d12ef0c
MT
477
478 do {
479 nmi = atomic_read(&hub_nmi->in_nmi);
480 if (nmi)
481 break;
482
483 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
abdf1df6 484 nmi_detected = uv_test_nmi(hub_nmi);
0d12ef0c 485
1e740163 486 /* Check flag for UV external NMI */
abdf1df6 487 if (nmi_detected > 0) {
0d12ef0c
MT
488 uv_set_in_nmi(cpu, hub_nmi);
489 nmi = 1;
490 break;
491 }
492
abdf1df6 493 /* A non-PCH node in a hubless system waits for NMI */
494 else if (nmi_detected < 0)
495 goto slave_wait;
496
497 /* MMR/PCH NMI flag is clear */
0d12ef0c
MT
498 raw_spin_unlock(&hub_nmi->nmi_lock);
499
500 } else {
abdf1df6 501
502 /* Wait a moment for the HUB NMI locker to set flag */
503slave_wait: cpu_relax();
0d12ef0c
MT
504 udelay(uv_nmi_slave_delay);
505
1e740163 506 /* Re-check hub in_nmi flag */
0d12ef0c
MT
507 nmi = atomic_read(&hub_nmi->in_nmi);
508 if (nmi)
509 break;
510 }
511
abdf1df6 512 /*
513 * Check if this BMC missed setting the MMR NMI flag (or)
514 * UV hubless system where only PCH owner can check flag
515 */
0d12ef0c
MT
516 if (!nmi) {
517 nmi = atomic_read(&uv_in_nmi);
518 if (nmi)
519 uv_set_in_nmi(cpu, hub_nmi);
520 }
521
abdf1df6 522 /* If we're holding the hub lock, release it now */
523 if (nmi_detected < 0)
524 raw_spin_unlock(&hub_nmi->nmi_lock);
525
0d12ef0c
MT
526 } while (0);
527
528 if (!nmi)
529 local64_inc(&uv_nmi_misses);
530
531 return nmi;
532}
533
534/* Need to reset the NMI MMR register, but only once per hub. */
535static inline void uv_clear_nmi(int cpu)
536{
537 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
538
539 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
540 atomic_set(&hub_nmi->cpu_owner, -1);
541 atomic_set(&hub_nmi->in_nmi, 0);
abdf1df6 542 if (hub_nmi->hub_present)
543 uv_local_mmr_clear_nmi();
544 else
545 uv_reassert_nmi();
0d12ef0c
MT
546 raw_spin_unlock(&hub_nmi->nmi_lock);
547 }
548}
549
a97673a1 550/* Ping non-responding CPU's attempting to force them into the NMI handler */
0d12ef0c
MT
551static void uv_nmi_nr_cpus_ping(void)
552{
553 int cpu;
554
555 for_each_cpu(cpu, uv_nmi_cpu_mask)
e1632170 556 uv_cpu_nmi_per(cpu).pinging = 1;
0d12ef0c
MT
557
558 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
559}
560
1e740163 561/* Clean up flags for CPU's that ignored both NMI and ping */
0d12ef0c
MT
562static void uv_nmi_cleanup_mask(void)
563{
564 int cpu;
565
566 for_each_cpu(cpu, uv_nmi_cpu_mask) {
e1632170
CL
567 uv_cpu_nmi_per(cpu).pinging = 0;
568 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
0d12ef0c
MT
569 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
570 }
571}
572
1e740163 573/* Loop waiting as CPU's enter NMI handler */
0d12ef0c
MT
574static int uv_nmi_wait_cpus(int first)
575{
576 int i, j, k, n = num_online_cpus();
577 int last_k = 0, waiting = 0;
abdf1df6 578 int cpu = smp_processor_id();
0d12ef0c
MT
579
580 if (first) {
581 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
582 k = 0;
583 } else {
584 k = n - cpumask_weight(uv_nmi_cpu_mask);
585 }
586
1e740163 587 /* PCH NMI causes only one CPU to respond */
abdf1df6 588 if (first && uv_pch_intr_now_enabled) {
589 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
590 return n - k - 1;
591 }
592
0d12ef0c
MT
593 udelay(uv_nmi_initial_delay);
594 for (i = 0; i < uv_nmi_retry_count; i++) {
595 int loop_delay = uv_nmi_loop_delay;
596
597 for_each_cpu(j, uv_nmi_cpu_mask) {
e1632170 598 if (uv_cpu_nmi_per(j).state) {
0d12ef0c
MT
599 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
600 if (++k >= n)
601 break;
602 }
603 }
604 if (k >= n) { /* all in? */
605 k = n;
606 break;
607 }
1e740163 608 if (last_k != k) { /* abort if no new CPU's coming in */
0d12ef0c
MT
609 last_k = k;
610 waiting = 0;
611 } else if (++waiting > uv_nmi_wait_count)
612 break;
613
1e740163 614 /* Extend delay if waiting only for CPU 0: */
0d12ef0c
MT
615 if (waiting && (n - k) == 1 &&
616 cpumask_test_cpu(0, uv_nmi_cpu_mask))
617 loop_delay *= 100;
618
619 udelay(loop_delay);
620 }
621 atomic_set(&uv_nmi_cpus_in_nmi, k);
622 return n - k;
623}
624
1e740163 625/* Wait until all slave CPU's have entered UV NMI handler */
0d12ef0c
MT
626static void uv_nmi_wait(int master)
627{
1e740163 628 /* Indicate this CPU is in: */
e1632170 629 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
0d12ef0c 630
1e740163 631 /* If not the first CPU in (the master), then we are a slave CPU */
0d12ef0c
MT
632 if (!master)
633 return;
634
635 do {
1e740163 636 /* Wait for all other CPU's to gather here */
0d12ef0c
MT
637 if (!uv_nmi_wait_cpus(1))
638 break;
639
1e740163 640 /* If not all made it in, send IPI NMI to them */
abdf1df6 641 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
bf58b487
TH
642 cpumask_weight(uv_nmi_cpu_mask),
643 cpumask_pr_args(uv_nmi_cpu_mask));
644
0d12ef0c
MT
645 uv_nmi_nr_cpus_ping();
646
1e740163 647 /* If all CPU's are in, then done */
0d12ef0c
MT
648 if (!uv_nmi_wait_cpus(0))
649 break;
650
bf58b487
TH
651 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
652 cpumask_weight(uv_nmi_cpu_mask),
653 cpumask_pr_args(uv_nmi_cpu_mask));
0d12ef0c
MT
654 } while (0);
655
656 pr_alert("UV: %d of %d CPUs in NMI\n",
657 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
658}
659
d0a9964e 660/* Dump Instruction Pointer header */
3c121d9a
MT
661static void uv_nmi_dump_cpu_ip_hdr(void)
662{
d0a9964e 663 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
3c121d9a
MT
664 "CPU", "PID", "COMMAND", "IP");
665}
666
d0a9964e 667/* Dump Instruction Pointer info */
3c121d9a
MT
668static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
669{
bb5e5ce5
JP
670 pr_info("UV: %4d %6d %-32.32s %pS",
671 cpu, current->pid, current->comm, (void *)regs->ip);
3c121d9a
MT
672}
673
d0a9964e
MT
674/*
675 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
676 * failed, then we provide "dump" as an alternate action. Action "dump" now
677 * also includes the show "ips" (instruction pointers) action whereas the
678 * action "ips" only displays instruction pointers for the non-idle CPU's.
679 * This is an abbreviated form of the "ps" command.
680 */
0d12ef0c
MT
681static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
682{
683 const char *dots = " ................................. ";
684
d0a9964e
MT
685 if (cpu == 0)
686 uv_nmi_dump_cpu_ip_hdr();
3c121d9a 687
d0a9964e
MT
688 if (current->pid != 0 || !uv_nmi_action_is("ips"))
689 uv_nmi_dump_cpu_ip(cpu, regs);
3c121d9a 690
d0a9964e
MT
691 if (uv_nmi_action_is("dump")) {
692 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
3c121d9a
MT
693 show_regs(regs);
694 }
d0a9964e 695
e1632170 696 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
0d12ef0c
MT
697}
698
1e740163 699/* Trigger a slave CPU to dump it's state */
0d12ef0c
MT
700static void uv_nmi_trigger_dump(int cpu)
701{
702 int retry = uv_nmi_trigger_delay;
703
e1632170 704 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
0d12ef0c
MT
705 return;
706
e1632170 707 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
0d12ef0c
MT
708 do {
709 cpu_relax();
710 udelay(10);
e1632170 711 if (uv_cpu_nmi_per(cpu).state
0d12ef0c
MT
712 != UV_NMI_STATE_DUMP)
713 return;
714 } while (--retry > 0);
715
716 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
e1632170 717 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
0d12ef0c
MT
718}
719
1e740163 720/* Wait until all CPU's ready to exit */
0d12ef0c
MT
721static void uv_nmi_sync_exit(int master)
722{
723 atomic_dec(&uv_nmi_cpus_in_nmi);
724 if (master) {
725 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
726 cpu_relax();
727 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
728 } else {
729 while (atomic_read(&uv_nmi_slave_continue))
730 cpu_relax();
731 }
732}
733
278c9b09 734/* Current "health" check is to check which CPU's are responsive */
735static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
736{
737 if (master) {
738 int in = atomic_read(&uv_nmi_cpus_in_nmi);
739 int out = num_online_cpus() - in;
740
741 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
742 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
743 } else {
744 while (!atomic_read(&uv_nmi_slave_continue))
745 cpu_relax();
746 }
747 uv_nmi_sync_exit(master);
748}
749
1e740163 750/* Walk through CPU list and dump state of each */
0d12ef0c
MT
751static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
752{
753 if (master) {
754 int tcpu;
755 int ignored = 0;
756 int saved_console_loglevel = console_loglevel;
757
3c121d9a
MT
758 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
759 uv_nmi_action_is("ips") ? "IPs" : "processes",
0d12ef0c
MT
760 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
761
762 console_loglevel = uv_nmi_loglevel;
763 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
764 for_each_online_cpu(tcpu) {
765 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
766 ignored++;
767 else if (tcpu == cpu)
768 uv_nmi_dump_state_cpu(tcpu, regs);
769 else
770 uv_nmi_trigger_dump(tcpu);
1e019421 771 }
0d12ef0c 772 if (ignored)
d0a9964e 773 pr_alert("UV: %d CPUs ignored NMI\n", ignored);
0d12ef0c
MT
774
775 console_loglevel = saved_console_loglevel;
776 pr_alert("UV: process trace complete\n");
777 } else {
778 while (!atomic_read(&uv_nmi_slave_continue))
779 cpu_relax();
e1632170 780 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
0d12ef0c
MT
781 cpu_relax();
782 uv_nmi_dump_state_cpu(cpu, regs);
1e019421 783 }
0d12ef0c
MT
784 uv_nmi_sync_exit(master);
785}
1e019421 786
0d12ef0c
MT
787static void uv_nmi_touch_watchdogs(void)
788{
789 touch_softlockup_watchdog_sync();
790 clocksource_touch_watchdog();
791 rcu_cpu_stall_reset();
792 touch_nmi_watchdog();
793}
794
74c93f9d 795static atomic_t uv_nmi_kexec_failed;
d0a9964e
MT
796
797#if defined(CONFIG_KEXEC_CORE)
12ba6c99
MT
798static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
799{
800 /* Call crash to dump system state */
801 if (master) {
802 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
803 crash_kexec(regs);
804
805 pr_emerg("UV: crash_kexec unexpectedly returned, ");
d0a9964e 806 atomic_set(&uv_nmi_kexec_failed, 1);
12ba6c99
MT
807 if (!kexec_crash_image) {
808 pr_cont("crash kernel not loaded\n");
12ba6c99
MT
809 return;
810 }
811 pr_cont("kexec busy, stalling cpus while waiting\n");
812 }
813
814 /* If crash exec fails the slaves should return, otherwise stall */
815 while (atomic_read(&uv_nmi_kexec_failed) == 0)
816 mdelay(10);
12ba6c99
MT
817}
818
2965faa5 819#else /* !CONFIG_KEXEC_CORE */
12ba6c99
MT
820static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
821{
822 if (master)
823 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
d0a9964e 824 atomic_set(&uv_nmi_kexec_failed, 1);
12ba6c99 825}
2965faa5 826#endif /* !CONFIG_KEXEC_CORE */
12ba6c99 827
64389998 828#ifdef CONFIG_KGDB
e379ea82 829#ifdef CONFIG_KGDB_KDB
64389998 830static inline int uv_nmi_kdb_reason(void)
e379ea82 831{
64389998
MT
832 return KDB_REASON_SYSTEM_NMI;
833}
834#else /* !CONFIG_KGDB_KDB */
835static inline int uv_nmi_kdb_reason(void)
836{
abdf1df6 837 /* Ensure user is expecting to attach gdb remote */
64389998
MT
838 if (uv_nmi_action_is("kgdb"))
839 return 0;
e379ea82 840
64389998
MT
841 pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
842 return -1;
843}
844#endif /* CONFIG_KGDB_KDB */
845
846/*
847 * Call KGDB/KDB from NMI handler
848 *
849 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
850 * 'kdb' has no affect on which is used. See the KGDB documention for further
851 * information.
852 */
853static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
854{
e379ea82 855 if (master) {
64389998
MT
856 int reason = uv_nmi_kdb_reason();
857 int ret;
858
859 if (reason < 0)
860 return;
861
1e740163 862 /* Call KGDB NMI handler as MASTER */
64389998
MT
863 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
864 &uv_nmi_slave_continue);
e379ea82 865 if (ret) {
64389998 866 pr_alert("KGDB returned error, is kgdboc set?\n");
e379ea82
MT
867 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
868 }
869 } else {
1e740163 870 /* Wait for KGDB signal that it's ready for slaves to enter */
e379ea82
MT
871 int sig;
872
873 do {
874 cpu_relax();
875 sig = atomic_read(&uv_nmi_slave_continue);
876 } while (!sig);
877
1e740163 878 /* Call KGDB as slave */
e379ea82
MT
879 if (sig == SLAVE_CONTINUE)
880 kgdb_nmicallback(cpu, regs);
881 }
882 uv_nmi_sync_exit(master);
883}
884
64389998
MT
885#else /* !CONFIG_KGDB */
886static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
e379ea82 887{
64389998 888 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
e379ea82 889}
64389998 890#endif /* !CONFIG_KGDB */
e379ea82 891
0d12ef0c
MT
892/*
893 * UV NMI handler
894 */
d553d03f 895static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
0d12ef0c
MT
896{
897 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
898 int cpu = smp_processor_id();
899 int master = 0;
900 unsigned long flags;
901
902 local_irq_save(flags);
903
904 /* If not a UV System NMI, ignore */
e1632170 905 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
0d12ef0c 906 local_irq_restore(flags);
1e019421 907 return NMI_DONE;
0d12ef0c 908 }
1e019421 909
0d12ef0c
MT
910 /* Indicate we are the first CPU into the NMI handler */
911 master = (atomic_read(&uv_nmi_cpu) == cpu);
1e019421 912
12ba6c99 913 /* If NMI action is "kdump", then attempt to do it */
d0a9964e 914 if (uv_nmi_action_is("kdump")) {
12ba6c99
MT
915 uv_nmi_kdump(cpu, master, regs);
916
d0a9964e
MT
917 /* Unexpected return, revert action to "dump" */
918 if (master)
919 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
920 }
921
1e740163 922 /* Pause as all CPU's enter the NMI handler */
0d12ef0c
MT
923 uv_nmi_wait(master);
924
abdf1df6 925 /* Process actions other than "kdump": */
278c9b09 926 if (uv_nmi_action_is("health")) {
927 uv_nmi_action_health(cpu, regs, master);
928 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
3c121d9a 929 uv_nmi_dump_state(cpu, regs, master);
abdf1df6 930 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
64389998 931 uv_call_kgdb_kdb(cpu, regs, master);
abdf1df6 932 } else {
933 if (master)
934 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
935 uv_nmi_sync_exit(master);
936 }
e379ea82 937
abdf1df6 938 /* Clear per_cpu "in_nmi" flag */
e1632170 939 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
0d12ef0c
MT
940
941 /* Clear MMR NMI flag on each hub */
942 uv_clear_nmi(cpu);
943
944 /* Clear global flags */
945 if (master) {
946 if (cpumask_weight(uv_nmi_cpu_mask))
947 uv_nmi_cleanup_mask();
948 atomic_set(&uv_nmi_cpus_in_nmi, -1);
949 atomic_set(&uv_nmi_cpu, -1);
950 atomic_set(&uv_in_nmi, 0);
d0a9964e 951 atomic_set(&uv_nmi_kexec_failed, 0);
abdf1df6 952 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
0d12ef0c
MT
953 }
954
955 uv_nmi_touch_watchdogs();
956 local_irq_restore(flags);
1e019421
MT
957
958 return NMI_HANDLED;
959}
960
0d12ef0c 961/*
1e740163 962 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
0d12ef0c 963 */
74c93f9d 964static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
0d12ef0c
MT
965{
966 int ret;
967
e1632170
CL
968 this_cpu_inc(uv_cpu_nmi.queries);
969 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
0d12ef0c
MT
970 local64_inc(&uv_nmi_ping_misses);
971 return NMI_DONE;
972 }
973
e1632170 974 this_cpu_inc(uv_cpu_nmi.pings);
0d12ef0c
MT
975 local64_inc(&uv_nmi_ping_count);
976 ret = uv_handle_nmi(reason, regs);
e1632170 977 this_cpu_write(uv_cpu_nmi.pinging, 0);
0d12ef0c
MT
978 return ret;
979}
980
74c93f9d 981static void uv_register_nmi_notifier(void)
1e019421
MT
982{
983 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
0d12ef0c
MT
984 pr_warn("UV: NMI handler failed to register\n");
985
986 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
987 pr_warn("UV: PING NMI handler failed to register\n");
1e019421
MT
988}
989
990void uv_nmi_init(void)
991{
992 unsigned int value;
993
994 /*
1e740163 995 * Unmask NMI on all CPU's
1e019421
MT
996 */
997 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
998 value &= ~APIC_LVT_MASKED;
999 apic_write(APIC_LVT1, value);
1000}
1001
abdf1df6 1002/* Setup HUB NMI info */
d553d03f 1003static void __init uv_nmi_setup_common(bool hubbed)
0d12ef0c
MT
1004{
1005 int size = sizeof(void *) * (1 << NODES_SHIFT);
abdf1df6 1006 int cpu;
0d12ef0c 1007
0d12ef0c 1008 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
abdf1df6 1009 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
0d12ef0c
MT
1010 BUG_ON(!uv_hub_nmi_list);
1011 size = sizeof(struct uv_hub_nmi_s);
1012 for_each_present_cpu(cpu) {
abdf1df6 1013 int nid = cpu_to_node(cpu);
0d12ef0c
MT
1014 if (uv_hub_nmi_list[nid] == NULL) {
1015 uv_hub_nmi_list[nid] = kzalloc_node(size,
1016 GFP_KERNEL, nid);
1017 BUG_ON(!uv_hub_nmi_list[nid]);
1018 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
1019 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
abdf1df6 1020 uv_hub_nmi_list[nid]->hub_present = hubbed;
1021 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
0d12ef0c
MT
1022 }
1023 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
1024 }
8a1f4653 1025 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
abdf1df6 1026}
1027
1028/* Setup for UV Hub systems */
1029void __init uv_nmi_setup(void)
1030{
1031 uv_nmi_setup_mmrs();
1032 uv_nmi_setup_common(true);
1033 uv_register_nmi_notifier();
1034 pr_info("UV: Hub NMI enabled\n");
1035}
1036
1037/* Setup for UV Hubless systems */
1038void __init uv_nmi_setup_hubless(void)
1039{
1040 uv_nmi_setup_common(false);
1041 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
1042 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
1043 pch_base, PCH_PCR_GPIO_1_BASE);
56e17ca2 1044 if (uv_pch_init_enable)
1045 uv_init_hubless_pch_d0();
abdf1df6 1046 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
1047 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
1048 uv_nmi_setup_hubless_intr();
1049 /* Ensure NMI enabled in Processor Interface Reg: */
1050 uv_reassert_nmi();
74c93f9d 1051 uv_register_nmi_notifier();
abdf1df6 1052 pr_info("UV: Hubless NMI enabled\n");
0d12ef0c 1053}