2 * SGI NMI support routines
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Mike Travis
22 #include <linux/cpu.h>
23 #include <linux/delay.h>
24 #include <linux/kdb.h>
25 #include <linux/kexec.h>
26 #include <linux/kgdb.h>
27 #include <linux/moduleparam.h>
28 #include <linux/nmi.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/clocksource.h>
34 #include <asm/current.h>
35 #include <asm/kdebug.h>
36 #include <asm/local64.h>
38 #include <asm/traps.h>
39 #include <asm/uv/uv.h>
40 #include <asm/uv/uv_hub.h>
41 #include <asm/uv/uv_mmrs.h>
46 * Handle system-wide NMI events generated by the global 'power nmi' command.
48 * Basic operation is to field the NMI interrupt on each CPU and wait
49 * until all CPU's have arrived into the nmi handler. If some CPU's do not
50 * make it into the handler, try and force them in with the IPI(NMI) signal.
52 * We also have to lessen UV Hub MMR accesses as much as possible as this
53 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
54 * can cause system problems to occur.
56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
57 * chain. This reduces the number of false NMI calls when the perf
58 * tools are running which generate an enormous number of NMIs per
59 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
60 * very short as it only checks that if it has been "pinged" with the
61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
65 static struct uv_hub_nmi_s **uv_hub_nmi_list;
67 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
69 /* UV hubless values */
70 #define NMI_CONTROL_PORT 0x70
71 #define NMI_DUMMY_PORT 0x71
72 #define PAD_OWN_GPP_D_0 0x2c
73 #define GPI_NMI_STS_GPP_D_0 0x164
74 #define GPI_NMI_ENA_GPP_D_0 0x174
75 #define STS_GPP_D_0_MASK 0x1
76 #define PAD_CFG_DW0_GPP_D_0 0x4c0
77 #define GPIROUTNMI (1ul << 17)
78 #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
79 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
82 static unsigned long nmi_mmr;
83 static unsigned long nmi_mmr_clear;
84 static unsigned long nmi_mmr_pending;
86 static atomic_t uv_in_nmi;
87 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
88 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
89 static atomic_t uv_nmi_slave_continue;
90 static cpumask_var_t uv_nmi_cpu_mask;
92 /* Values for uv_nmi_slave_continue */
94 #define SLAVE_CONTINUE 1
98 * Default is all stack dumps go to the console and buffer.
99 * Lower level to send to log buffer only.
101 static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
102 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
105 * The following values show statistics on how perf events are affecting
108 static int param_get_local64(char *buffer, const struct kernel_param *kp)
110 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
113 static int param_set_local64(const char *val, const struct kernel_param *kp)
115 /* Clear on any write */
116 local64_set((local64_t *)kp->arg, 0);
120 static const struct kernel_param_ops param_ops_local64 = {
121 .get = param_get_local64,
122 .set = param_set_local64,
124 #define param_check_local64(name, p) __param_check(name, p, local64_t)
126 static local64_t uv_nmi_count;
127 module_param_named(nmi_count, uv_nmi_count, local64, 0644);
129 static local64_t uv_nmi_misses;
130 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
132 static local64_t uv_nmi_ping_count;
133 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
135 static local64_t uv_nmi_ping_misses;
136 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
139 * Following values allow tuning for large systems under heavy loading
141 static int uv_nmi_initial_delay = 100;
142 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
144 static int uv_nmi_slave_delay = 100;
145 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
147 static int uv_nmi_loop_delay = 100;
148 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
150 static int uv_nmi_trigger_delay = 10000;
151 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
153 static int uv_nmi_wait_count = 100;
154 module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
156 static int uv_nmi_retry_count = 500;
157 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
159 static bool uv_pch_intr_enable = true;
160 static bool uv_pch_intr_now_enabled;
161 module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
163 static bool uv_pch_init_enable = true;
164 module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
166 static int uv_nmi_debug;
167 module_param_named(debug, uv_nmi_debug, int, 0644);
169 #define nmi_debug(fmt, ...) \
172 pr_info(fmt, ##__VA_ARGS__); \
175 /* Valid NMI Actions */
176 #define ACTION_LEN 16
177 static struct nmi_action {
181 { "kdump", "do kernel crash dump" },
182 { "dump", "dump process stack for each cpu" },
183 { "ips", "dump Inst Ptr info for each cpu" },
184 { "kdb", "enter KDB (needs kgdboc= assignment)" },
185 { "kgdb", "enter KGDB (needs gdb target remote)" },
186 { "health", "check if CPUs respond to NMI" },
188 typedef char action_t[ACTION_LEN];
189 static action_t uv_nmi_action = { "dump" };
191 static int param_get_action(char *buffer, const struct kernel_param *kp)
193 return sprintf(buffer, "%s\n", uv_nmi_action);
196 static int param_set_action(const char *val, const struct kernel_param *kp)
199 int n = ARRAY_SIZE(valid_acts);
200 char arg[ACTION_LEN], *p;
202 /* (remove possible '\n') */
203 strncpy(arg, val, ACTION_LEN - 1);
204 arg[ACTION_LEN - 1] = '\0';
205 p = strchr(arg, '\n');
209 for (i = 0; i < n; i++)
210 if (!strcmp(arg, valid_acts[i].action))
214 strcpy(uv_nmi_action, arg);
215 pr_info("UV: New NMI action:%s\n", uv_nmi_action);
219 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
220 for (i = 0; i < n; i++)
221 pr_err("UV: %-8s - %s\n",
222 valid_acts[i].action, valid_acts[i].desc);
226 static const struct kernel_param_ops param_ops_action = {
227 .get = param_get_action,
228 .set = param_set_action,
230 #define param_check_action(name, p) __param_check(name, p, action_t)
232 module_param_named(action, uv_nmi_action, action, 0644);
234 static inline bool uv_nmi_action_is(const char *action)
236 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
239 /* Setup which NMI support is present in system */
240 static void uv_nmi_setup_mmrs(void)
242 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
243 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
244 1UL << UVH_NMI_MMRX_REQ_SHIFT);
245 nmi_mmr = UVH_NMI_MMRX;
246 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
247 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
248 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
250 nmi_mmr = UVH_NMI_MMR;
251 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
252 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
253 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
257 /* Read NMI MMR and check if NMI flag was set by BMC. */
258 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
260 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
261 atomic_inc(&hub_nmi->read_mmr_count);
262 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
265 static inline void uv_local_mmr_clear_nmi(void)
267 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
271 * UV hubless NMI handler functions
273 static inline void uv_reassert_nmi(void)
275 /* (from arch/x86/include/asm/mach_traps.h) */
276 outb(0x8f, NMI_CONTROL_PORT);
277 inb(NMI_DUMMY_PORT); /* dummy read */
278 outb(0x0f, NMI_CONTROL_PORT);
279 inb(NMI_DUMMY_PORT); /* dummy read */
282 static void uv_init_hubless_pch_io(int offset, int mask, int data)
284 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
285 int readd = readl(addr);
287 if (mask) { /* OR in new data */
288 int writed = (readd & ~mask) | data;
290 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
291 addr, readd, ~mask, data, writed);
292 writel(writed, addr);
293 } else if (readd & data) { /* clear status bit */
294 nmi_debug("UV:PCH: %p = %x\n", addr, data);
298 (void)readl(addr); /* flush write data */
301 static void uv_nmi_setup_hubless_intr(void)
303 uv_pch_intr_now_enabled = uv_pch_intr_enable;
305 uv_init_hubless_pch_io(
306 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
307 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
309 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
310 uv_pch_intr_now_enabled ? "enabled" : "disabled");
313 static struct init_nmi {
318 { /* HOSTSW_OWN_GPP_D_0 */
321 .data = 0x0, /* ACPI Mode */
325 { /* GPI_INT_STS_GPP_D_0 */
328 .data = 0x1, /* Clear Status */
330 { /* GPI_GPE_STS_GPP_D_0 */
333 .data = 0x1, /* Clear Status */
335 { /* GPI_SMI_STS_GPP_D_0 */
338 .data = 0x1, /* Clear Status */
340 { /* GPI_NMI_STS_GPP_D_0 */
343 .data = 0x1, /* Clear Status */
346 /* Disable interrupts: */
347 { /* GPI_INT_EN_GPP_D_0 */
350 .data = 0x0, /* Disable interrupt generation */
352 { /* GPI_GPE_EN_GPP_D_0 */
355 .data = 0x0, /* Disable interrupt generation */
357 { /* GPI_SMI_EN_GPP_D_0 */
360 .data = 0x0, /* Disable interrupt generation */
362 { /* GPI_NMI_EN_GPP_D_0 */
365 .data = 0x0, /* Disable interrupt generation */
368 /* Setup GPP_D_0 Pad Config: */
369 { /* PAD_CFG_DW0_GPP_D_0 */
374 * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default)
376 * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
377 * from RX buffer (default)
379 * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override
381 * 26:25 RX Level/Edge Configuration (RXEVCFG):
385 * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high)
387 * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
388 * = 0 # Routing does not cause peripheral IRQ...
389 * # (we want an NMI not an IRQ)
391 * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
392 * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
393 * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
395 * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
396 * 9 GPIO RX Disable (GPIORXDIS):
397 * = 0 # Enable the input buffer (active low enable)
399 * 8 GPIO TX Disable (GPIOTXDIS):
400 * = 1 # Disable the output buffer; i.e. Hi-Z
402 * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
403 * 0 GPIO TX State (GPIOTXSTATE):
404 * = 0 # (Leave at default)
409 { /* PAD_CFG_DW1_GPP_D_0 */
412 .data = 0, /* Termination = none (default) */
416 static void uv_init_hubless_pch_d0(void)
420 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
422 pr_info("UV: Hubless NMI already configured\n");
426 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
427 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
428 uv_init_hubless_pch_io(init_nmi[i].offset,
434 static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
436 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
439 hub_nmi->nmi_value = status;
440 atomic_inc(&hub_nmi->read_mmr_count);
442 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
445 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
446 (void)*pstat; /* Flush write */
451 static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
453 if (hub_nmi->hub_present)
454 return uv_nmi_test_mmr(hub_nmi);
456 if (hub_nmi->pch_owner) /* Only PCH owner can check status */
457 return uv_nmi_test_hubless(hub_nmi);
463 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
464 * return true. If first CPU in on the system, set global "in_nmi" flag.
466 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
468 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
471 atomic_set(&hub_nmi->cpu_owner, cpu);
472 if (atomic_add_unless(&uv_in_nmi, 1, 1))
473 atomic_set(&uv_nmi_cpu, cpu);
475 atomic_inc(&hub_nmi->nmi_count);
480 /* Check if this is a system NMI event */
481 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
483 int cpu = smp_processor_id();
485 int nmi_detected = 0;
487 local64_inc(&uv_nmi_count);
488 this_cpu_inc(uv_cpu_nmi.queries);
491 nmi = atomic_read(&hub_nmi->in_nmi);
495 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
496 nmi_detected = uv_test_nmi(hub_nmi);
498 /* Check flag for UV external NMI */
499 if (nmi_detected > 0) {
500 uv_set_in_nmi(cpu, hub_nmi);
505 /* A non-PCH node in a hubless system waits for NMI */
506 else if (nmi_detected < 0)
509 /* MMR/PCH NMI flag is clear */
510 raw_spin_unlock(&hub_nmi->nmi_lock);
514 /* Wait a moment for the HUB NMI locker to set flag */
515 slave_wait: cpu_relax();
516 udelay(uv_nmi_slave_delay);
518 /* Re-check hub in_nmi flag */
519 nmi = atomic_read(&hub_nmi->in_nmi);
525 * Check if this BMC missed setting the MMR NMI flag (or)
526 * UV hubless system where only PCH owner can check flag
529 nmi = atomic_read(&uv_in_nmi);
531 uv_set_in_nmi(cpu, hub_nmi);
534 /* If we're holding the hub lock, release it now */
535 if (nmi_detected < 0)
536 raw_spin_unlock(&hub_nmi->nmi_lock);
541 local64_inc(&uv_nmi_misses);
546 /* Need to reset the NMI MMR register, but only once per hub. */
547 static inline void uv_clear_nmi(int cpu)
549 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
551 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
552 atomic_set(&hub_nmi->cpu_owner, -1);
553 atomic_set(&hub_nmi->in_nmi, 0);
554 if (hub_nmi->hub_present)
555 uv_local_mmr_clear_nmi();
558 raw_spin_unlock(&hub_nmi->nmi_lock);
562 /* Ping non-responding CPU's attemping to force them into the NMI handler */
563 static void uv_nmi_nr_cpus_ping(void)
567 for_each_cpu(cpu, uv_nmi_cpu_mask)
568 uv_cpu_nmi_per(cpu).pinging = 1;
570 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
573 /* Clean up flags for CPU's that ignored both NMI and ping */
574 static void uv_nmi_cleanup_mask(void)
578 for_each_cpu(cpu, uv_nmi_cpu_mask) {
579 uv_cpu_nmi_per(cpu).pinging = 0;
580 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
581 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
585 /* Loop waiting as CPU's enter NMI handler */
586 static int uv_nmi_wait_cpus(int first)
588 int i, j, k, n = num_online_cpus();
589 int last_k = 0, waiting = 0;
590 int cpu = smp_processor_id();
593 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
596 k = n - cpumask_weight(uv_nmi_cpu_mask);
599 /* PCH NMI causes only one CPU to respond */
600 if (first && uv_pch_intr_now_enabled) {
601 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
605 udelay(uv_nmi_initial_delay);
606 for (i = 0; i < uv_nmi_retry_count; i++) {
607 int loop_delay = uv_nmi_loop_delay;
609 for_each_cpu(j, uv_nmi_cpu_mask) {
610 if (uv_cpu_nmi_per(j).state) {
611 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
616 if (k >= n) { /* all in? */
620 if (last_k != k) { /* abort if no new CPU's coming in */
623 } else if (++waiting > uv_nmi_wait_count)
626 /* Extend delay if waiting only for CPU 0: */
627 if (waiting && (n - k) == 1 &&
628 cpumask_test_cpu(0, uv_nmi_cpu_mask))
633 atomic_set(&uv_nmi_cpus_in_nmi, k);
637 /* Wait until all slave CPU's have entered UV NMI handler */
638 static void uv_nmi_wait(int master)
640 /* Indicate this CPU is in: */
641 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
643 /* If not the first CPU in (the master), then we are a slave CPU */
648 /* Wait for all other CPU's to gather here */
649 if (!uv_nmi_wait_cpus(1))
652 /* If not all made it in, send IPI NMI to them */
653 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
654 cpumask_weight(uv_nmi_cpu_mask),
655 cpumask_pr_args(uv_nmi_cpu_mask));
657 uv_nmi_nr_cpus_ping();
659 /* If all CPU's are in, then done */
660 if (!uv_nmi_wait_cpus(0))
663 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
664 cpumask_weight(uv_nmi_cpu_mask),
665 cpumask_pr_args(uv_nmi_cpu_mask));
668 pr_alert("UV: %d of %d CPUs in NMI\n",
669 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
672 /* Dump Instruction Pointer header */
673 static void uv_nmi_dump_cpu_ip_hdr(void)
675 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
676 "CPU", "PID", "COMMAND", "IP");
679 /* Dump Instruction Pointer info */
680 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
682 pr_info("UV: %4d %6d %-32.32s %pS",
683 cpu, current->pid, current->comm, (void *)regs->ip);
687 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
688 * failed, then we provide "dump" as an alternate action. Action "dump" now
689 * also includes the show "ips" (instruction pointers) action whereas the
690 * action "ips" only displays instruction pointers for the non-idle CPU's.
691 * This is an abbreviated form of the "ps" command.
693 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
695 const char *dots = " ................................. ";
698 uv_nmi_dump_cpu_ip_hdr();
700 if (current->pid != 0 || !uv_nmi_action_is("ips"))
701 uv_nmi_dump_cpu_ip(cpu, regs);
703 if (uv_nmi_action_is("dump")) {
704 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
708 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
711 /* Trigger a slave CPU to dump it's state */
712 static void uv_nmi_trigger_dump(int cpu)
714 int retry = uv_nmi_trigger_delay;
716 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
719 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
723 if (uv_cpu_nmi_per(cpu).state
724 != UV_NMI_STATE_DUMP)
726 } while (--retry > 0);
728 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
729 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
732 /* Wait until all CPU's ready to exit */
733 static void uv_nmi_sync_exit(int master)
735 atomic_dec(&uv_nmi_cpus_in_nmi);
737 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
739 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
741 while (atomic_read(&uv_nmi_slave_continue))
746 /* Current "health" check is to check which CPU's are responsive */
747 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
750 int in = atomic_read(&uv_nmi_cpus_in_nmi);
751 int out = num_online_cpus() - in;
753 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
754 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
756 while (!atomic_read(&uv_nmi_slave_continue))
759 uv_nmi_sync_exit(master);
762 /* Walk through CPU list and dump state of each */
763 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
768 int saved_console_loglevel = console_loglevel;
770 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
771 uv_nmi_action_is("ips") ? "IPs" : "processes",
772 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
774 console_loglevel = uv_nmi_loglevel;
775 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
776 for_each_online_cpu(tcpu) {
777 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
779 else if (tcpu == cpu)
780 uv_nmi_dump_state_cpu(tcpu, regs);
782 uv_nmi_trigger_dump(tcpu);
785 pr_alert("UV: %d CPUs ignored NMI\n", ignored);
787 console_loglevel = saved_console_loglevel;
788 pr_alert("UV: process trace complete\n");
790 while (!atomic_read(&uv_nmi_slave_continue))
792 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
794 uv_nmi_dump_state_cpu(cpu, regs);
796 uv_nmi_sync_exit(master);
799 static void uv_nmi_touch_watchdogs(void)
801 touch_softlockup_watchdog_sync();
802 clocksource_touch_watchdog();
803 rcu_cpu_stall_reset();
804 touch_nmi_watchdog();
807 static atomic_t uv_nmi_kexec_failed;
809 #if defined(CONFIG_KEXEC_CORE)
810 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
812 /* Call crash to dump system state */
814 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
817 pr_emerg("UV: crash_kexec unexpectedly returned, ");
818 atomic_set(&uv_nmi_kexec_failed, 1);
819 if (!kexec_crash_image) {
820 pr_cont("crash kernel not loaded\n");
823 pr_cont("kexec busy, stalling cpus while waiting\n");
826 /* If crash exec fails the slaves should return, otherwise stall */
827 while (atomic_read(&uv_nmi_kexec_failed) == 0)
831 #else /* !CONFIG_KEXEC_CORE */
832 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
835 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
836 atomic_set(&uv_nmi_kexec_failed, 1);
838 #endif /* !CONFIG_KEXEC_CORE */
841 #ifdef CONFIG_KGDB_KDB
842 static inline int uv_nmi_kdb_reason(void)
844 return KDB_REASON_SYSTEM_NMI;
846 #else /* !CONFIG_KGDB_KDB */
847 static inline int uv_nmi_kdb_reason(void)
849 /* Ensure user is expecting to attach gdb remote */
850 if (uv_nmi_action_is("kgdb"))
853 pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
856 #endif /* CONFIG_KGDB_KDB */
859 * Call KGDB/KDB from NMI handler
861 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
862 * 'kdb' has no affect on which is used. See the KGDB documention for further
865 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
868 int reason = uv_nmi_kdb_reason();
874 /* Call KGDB NMI handler as MASTER */
875 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
876 &uv_nmi_slave_continue);
878 pr_alert("KGDB returned error, is kgdboc set?\n");
879 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
882 /* Wait for KGDB signal that it's ready for slaves to enter */
887 sig = atomic_read(&uv_nmi_slave_continue);
890 /* Call KGDB as slave */
891 if (sig == SLAVE_CONTINUE)
892 kgdb_nmicallback(cpu, regs);
894 uv_nmi_sync_exit(master);
897 #else /* !CONFIG_KGDB */
898 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
900 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
902 #endif /* !CONFIG_KGDB */
907 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
909 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
910 int cpu = smp_processor_id();
914 local_irq_save(flags);
916 /* If not a UV System NMI, ignore */
917 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
918 local_irq_restore(flags);
922 /* Indicate we are the first CPU into the NMI handler */
923 master = (atomic_read(&uv_nmi_cpu) == cpu);
925 /* If NMI action is "kdump", then attempt to do it */
926 if (uv_nmi_action_is("kdump")) {
927 uv_nmi_kdump(cpu, master, regs);
929 /* Unexpected return, revert action to "dump" */
931 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
934 /* Pause as all CPU's enter the NMI handler */
937 /* Process actions other than "kdump": */
938 if (uv_nmi_action_is("health")) {
939 uv_nmi_action_health(cpu, regs, master);
940 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
941 uv_nmi_dump_state(cpu, regs, master);
942 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
943 uv_call_kgdb_kdb(cpu, regs, master);
946 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
947 uv_nmi_sync_exit(master);
950 /* Clear per_cpu "in_nmi" flag */
951 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
953 /* Clear MMR NMI flag on each hub */
956 /* Clear global flags */
958 if (cpumask_weight(uv_nmi_cpu_mask))
959 uv_nmi_cleanup_mask();
960 atomic_set(&uv_nmi_cpus_in_nmi, -1);
961 atomic_set(&uv_nmi_cpu, -1);
962 atomic_set(&uv_in_nmi, 0);
963 atomic_set(&uv_nmi_kexec_failed, 0);
964 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
967 uv_nmi_touch_watchdogs();
968 local_irq_restore(flags);
974 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
976 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
980 this_cpu_inc(uv_cpu_nmi.queries);
981 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
982 local64_inc(&uv_nmi_ping_misses);
986 this_cpu_inc(uv_cpu_nmi.pings);
987 local64_inc(&uv_nmi_ping_count);
988 ret = uv_handle_nmi(reason, regs);
989 this_cpu_write(uv_cpu_nmi.pinging, 0);
993 static void uv_register_nmi_notifier(void)
995 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
996 pr_warn("UV: NMI handler failed to register\n");
998 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
999 pr_warn("UV: PING NMI handler failed to register\n");
1002 void uv_nmi_init(void)
1007 * Unmask NMI on all CPU's
1009 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
1010 value &= ~APIC_LVT_MASKED;
1011 apic_write(APIC_LVT1, value);
1014 /* Setup HUB NMI info */
1015 void __init uv_nmi_setup_common(bool hubbed)
1017 int size = sizeof(void *) * (1 << NODES_SHIFT);
1020 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
1021 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
1022 BUG_ON(!uv_hub_nmi_list);
1023 size = sizeof(struct uv_hub_nmi_s);
1024 for_each_present_cpu(cpu) {
1025 int nid = cpu_to_node(cpu);
1026 if (uv_hub_nmi_list[nid] == NULL) {
1027 uv_hub_nmi_list[nid] = kzalloc_node(size,
1029 BUG_ON(!uv_hub_nmi_list[nid]);
1030 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
1031 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
1032 uv_hub_nmi_list[nid]->hub_present = hubbed;
1033 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
1035 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
1037 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
1040 /* Setup for UV Hub systems */
1041 void __init uv_nmi_setup(void)
1043 uv_nmi_setup_mmrs();
1044 uv_nmi_setup_common(true);
1045 uv_register_nmi_notifier();
1046 pr_info("UV: Hub NMI enabled\n");
1049 /* Setup for UV Hubless systems */
1050 void __init uv_nmi_setup_hubless(void)
1052 uv_nmi_setup_common(false);
1053 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
1054 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
1055 pch_base, PCH_PCR_GPIO_1_BASE);
1056 if (uv_pch_init_enable)
1057 uv_init_hubless_pch_d0();
1058 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
1059 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
1060 uv_nmi_setup_hubless_intr();
1061 /* Ensure NMI enabled in Processor Interface Reg: */
1063 uv_register_nmi_notifier();
1064 pr_info("UV: Hubless NMI enabled\n");