diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -230,38 +230,22 @@ VM_CNT_INC(v_trap); type = frame->tf_trapno; -#ifdef SMP - /* Handler for NMI IPIs used for stopping CPUs. */ - if (type == T_NMI && ipi_nmi_handler() == 0) - return; -#endif - #ifdef KDB if (kdb_active) { kdb_reenter(); return; } #endif + if (type == T_NMI) { + nmi_handle_intr(frame); + return; + } if (type == T_RESERVED) { trap_fatal(frame, 0); return; } - if (type == T_NMI) { -#ifdef HWPMC_HOOKS - /* - * CPU PMCs interrupt using an NMI. If the PMC module is - * active, pass the 'rip' value to the PMC module's interrupt - * handler. A non-zero return value from the handler means that - * the NMI was consumed by it and we can return immediately. - */ - if (pmc_intr != NULL && - (*pmc_intr)(frame) != 0) - return; -#endif - } - if ((frame->tf_rflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled @@ -392,10 +376,6 @@ signo = SIGFPE; break; - case T_NMI: - nmi_handle_intr(type, frame); - return; - case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; signo = SIGFPE; @@ -607,10 +587,6 @@ return; #endif break; - - case T_NMI: - nmi_handle_intr(type, frame); - return; } trap_fatal(frame, 0); diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -237,12 +237,6 @@ KASSERT((read_eflags() & PSL_I) == 0, ("trap: interrupts enabled, type %d frame %p", type, frame)); -#ifdef SMP - /* Handler for NMI IPIs used for stopping CPUs. */ - if (type == T_NMI && ipi_nmi_handler() == 0) - return; -#endif /* SMP */ - #ifdef KDB if (kdb_active) { kdb_reenter(); @@ -251,24 +245,14 @@ #endif trap_check_kstack(); - if (type == T_RESERVED) { - trap_fatal(frame, 0); + if (type == T_NMI) { + nmi_handle_intr(frame); return; } - if (type == T_NMI) { -#ifdef HWPMC_HOOKS - /* - * CPU PMCs interrupt using an NMI so we check for that first. - * If the HWPMC module is active, 'pmc_hook' will point to - * the function to be called. A non-zero return value from the - * hook means that the NMI was consumed by it and that we can - * return immediately. - */ - if (pmc_intr != NULL && - (*pmc_intr)(frame) != 0) - return; -#endif + if (type == T_RESERVED) { + trap_fatal(frame, 0); + return; } if (type == T_MCHK) { diff --git a/sys/x86/include/x86_var.h b/sys/x86/include/x86_var.h --- a/sys/x86/include/x86_var.h +++ b/sys/x86/include/x86_var.h @@ -147,7 +147,9 @@ void zenbleed_check_and_apply(bool all_cpus); void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame); void nmi_call_kdb_smp(u_int type, struct trapframe *frame); -void nmi_handle_intr(u_int type, struct trapframe *frame); +void nmi_register_handler(int (*handler)(struct trapframe *)); +void nmi_remove_handler(int (*handler)(struct trapframe *)); +void nmi_handle_intr(struct trapframe *frame); void pagecopy(void *from, void *to); void printcpuinfo(void); int pti_get_default(void); diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #ifdef SMP #include @@ -87,6 +88,7 @@ #include #include +#include #include #include #include @@ -885,18 +887,88 @@ panic("NMI"); } +/* + * Dynamically registered NMI handlers. + */ +static struct nmi_handler { + int (*func)(struct trapframe *); + struct nmi_handler *next; +} nmi_handlers_head; +static uma_zone_t nmi_handler_zone; + +void +nmi_register_handler(int (*handler)(struct trapframe *)) +{ + struct nmi_handler *hp; + + for (hp = &nmi_handlers_head; hp != NULL; hp = hp->next) { + if (atomic_load_ptr(&hp->func) == handler) + panic("%s: NMI handler already registered", __func__); + if (atomic_cmpset_ptr((volatile uintptr_t *)&hp->func, + (uintptr_t)NULL, (uintptr_t)handler) != 0) + return; + } + hp = uma_zalloc(nmi_handler_zone, M_WAITOK | M_ZERO); + hp->func = handler; + do { + hp->next = nmi_handlers_head.next; + } while ( + atomic_cmpset_ptr((volatile uintptr_t *)&nmi_handlers_head.next, + (uintptr_t)hp->next, (uintptr_t)hp) == 0); +} + +void +nmi_remove_handler(int (*handler)(struct trapframe *)) +{ + struct nmi_handler *hp; + + for (hp = &nmi_handlers_head; hp != NULL; hp = hp->next) { + if (atomic_cmpset_ptr((volatile u_long *)&hp->func, + (uintptr_t)handler, (uintptr_t)NULL) != 0) + return; + } + + panic("%s: attempting to remove an unregistered NMI handler %p\n", + __func__, handler); +} + void -nmi_handle_intr(u_int type, struct trapframe *frame) +nmi_handle_intr(struct trapframe *frame) { + struct nmi_handler *hp; + bool handled; +#ifdef SMP + /* Handler for NMI IPIs used for stopping CPUs. */ + if (ipi_nmi_handler() == 0) + return; +#endif + handled = false; + for (hp = &nmi_handlers_head; hp != NULL; hp = hp->next) { + if (hp->func != NULL && (*hp->func)(frame) != 0) { + handled = true; + } + } + if (handled) + return; #ifdef SMP if (nmi_is_broadcast) { - nmi_call_kdb_smp(type, frame); + nmi_call_kdb_smp(T_NMI, frame); return; } #endif - nmi_call_kdb(PCPU_GET(cpuid), type, frame); + nmi_call_kdb(PCPU_GET(cpuid), T_NMI, frame); +} + +static void +cpu_init_nmi_handlers(void *arg __unused) +{ + nmi_handler_zone = uma_zcreate("Dynamically registered NMI handlers", + sizeof(struct nmi_handler), NULL, NULL, NULL, NULL, 0, + UMA_ZONE_NOFREE); } +SYSINIT(cpu_init_nmi_handlers, SI_SUB_CPU, SI_ORDER_FIRST, + cpu_init_nmi_handlers, NULL); static int hw_ibrs_active; int hw_ibrs_ibpb_active;