diff options
Diffstat (limited to 'arch/x86/kernel/nmi.c')
-rw-r--r-- | arch/x86/kernel/nmi.c | 87 |
1 files changed, 43 insertions, 44 deletions
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 9a95d00f1423..be93ec7255bf 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -49,27 +49,20 @@ struct nmi_desc { struct list_head head; }; -static struct nmi_desc nmi_desc[NMI_MAX] = -{ - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), - .head = LIST_HEAD_INIT(nmi_desc[0].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), - .head = LIST_HEAD_INIT(nmi_desc[1].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), - .head = LIST_HEAD_INIT(nmi_desc[2].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), - .head = LIST_HEAD_INIT(nmi_desc[3].head), - }, +#define NMI_DESC_INIT(type) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[type].lock), \ + .head = LIST_HEAD_INIT(nmi_desc[type].head), \ +} +static struct nmi_desc nmi_desc[NMI_MAX] = { + NMI_DESC_INIT(NMI_LOCAL), + NMI_DESC_INIT(NMI_UNKNOWN), + NMI_DESC_INIT(NMI_SERR), + NMI_DESC_INIT(NMI_IO_CHECK), }; +#define nmi_to_desc(type) (&nmi_desc[type]) + struct nmi_stats { unsigned int normal; unsigned int unknown; @@ -91,6 +84,9 @@ static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); static int ignore_nmis __read_mostly; int unknown_nmi_panic; +int panic_on_unrecovered_nmi; +int panic_on_io_nmi; + /* * Prevent NMI reason port (0x61) being accessed simultaneously, can * only be used in NMI handler. @@ -104,8 +100,6 @@ static int __init setup_unknown_nmi_panic(char *str) } __setup("unknown_nmi_panic", setup_unknown_nmi_panic); -#define nmi_to_desc(type) (&nmi_desc[type]) - static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; static int __init nmi_warning_debugfs(void) @@ -125,12 +119,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) action->max_duration = duration; - remainder_ns = do_div(duration, (1000 * 1000)); - decimal_msecs = remainder_ns / 1000; + /* Convert duration from nsec to msec */ + remainder_ns = do_div(duration, NSEC_PER_MSEC); + decimal_msecs = remainder_ns / NSEC_PER_USEC; - printk_ratelimited(KERN_INFO - "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - action->handler, duration, decimal_msecs); + pr_info_ratelimited("INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", + action->handler, duration, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) @@ -333,10 +327,9 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) int handled; /* - * Use 'false' as back-to-back NMIs are dealt with one level up. - * Of course this makes having multiple 'unknown' handlers useless - * as only the first one is ever run (unless it can actually determine - * if it caused the NMI) + * As a last resort, let the "unknown" handlers make a + * best-effort attempt to figure out if they can claim + * responsibility for this Unknown NMI. */ handled = nmi_handle(NMI_UNKNOWN, regs); if (handled) { @@ -366,17 +359,18 @@ static noinstr void default_do_nmi(struct pt_regs *regs) bool b2b = false; /* - * CPU-specific NMI must be processed before non-CPU-specific - * NMI, otherwise we may lose it, because the CPU-specific - * NMI can not be detected/processed on other CPUs. - */ - - /* - * Back-to-back NMIs are interesting because they can either - * be two NMI or more than two NMIs (any thing over two is dropped - * due to NMI being edge-triggered). If this is the second half - * of the back-to-back NMI, assume we dropped things and process - * more handlers. Otherwise reset the 'swallow' NMI behaviour + * Back-to-back NMIs are detected by comparing the RIP of the + * current NMI with that of the previous NMI. If it is the same, + * it is assumed that the CPU did not have a chance to jump back + * into a non-NMI context and execute code in between the two + * NMIs. + * + * They are interesting because even if there are more than two, + * only a maximum of two can be detected (anything over two is + * dropped due to NMI being edge-triggered). If this is the + * second half of the back-to-back NMI, assume we dropped things + * and process more handlers. Otherwise, reset the 'swallow' NMI + * behavior. */ if (regs->ip == __this_cpu_read(last_nmi_rip)) b2b = true; @@ -390,6 +384,11 @@ static noinstr void default_do_nmi(struct pt_regs *regs) if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) goto out; + /* + * CPU-specific NMI must be processed before non-CPU-specific + * NMI, otherwise we may lose it, because the CPU-specific + * NMI can not be detected/processed on other CPUs. + */ handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -426,13 +425,14 @@ static noinstr void default_do_nmi(struct pt_regs *regs) pci_serr_error(reason, regs); else if (reason & NMI_REASON_IOCHK) io_check_error(reason, regs); -#ifdef CONFIG_X86_32 + /* * Reassert NMI in case it became active * meanwhile as it's edge-triggered: */ - reassert_nmi(); -#endif + if (IS_ENABLED(CONFIG_X86_32)) + reassert_nmi(); + __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); goto out; @@ -751,4 +751,3 @@ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } -EXPORT_SYMBOL_GPL(local_touch_nmi); |