Index: head/sys/x86/include/apicvar.h =================================================================== --- head/sys/x86/include/apicvar.h +++ head/sys/x86/include/apicvar.h @@ -111,11 +111,8 @@ #define IPI_INVLPG (APIC_IPI_INTS + 2) #define IPI_INVLRNG (APIC_IPI_INTS + 3) #define IPI_INVLCACHE (APIC_IPI_INTS + 4) -#ifdef __i386__ -#define IPI_LAZYPMAP (APIC_IPI_INTS + 5) /* Lazy pmap release. */ -#endif /* Vector to handle bitmap based IPIs */ -#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6) +#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5) /* IPIs handled by IPI_BITMAP_VECTOR */ #define IPI_AST 0 /* Generate software trap. */ @@ -124,8 +121,15 @@ #define IPI_BITMAP_LAST IPI_HARDCLOCK #define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST) -#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */ -#define IPI_SUSPEND (APIC_IPI_INTS + 8) /* Suspend CPU until restarted. */ +#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */ +#define IPI_SUSPEND (APIC_IPI_INTS + 7) /* Suspend CPU until restarted. */ +#ifdef __i386__ +#define IPI_LAZYPMAP (APIC_IPI_INTS + 8) /* Lazy pmap release. */ +#define IPI_DYN_FIRST (APIC_IPI_INTS + 9) +#else +#define IPI_DYN_FIRST (APIC_IPI_INTS + 8) +#endif +#define IPI_DYN_LAST (254) /* IPIs allocated at runtime */ /* * IPI_STOP_HARD does not need to occupy a slot in the IPI vector space since @@ -224,6 +228,8 @@ void (*ipi_raw)(register_t, u_int); void (*ipi_vectored)(u_int, int); int (*ipi_wait)(int); + int (*ipi_alloc)(inthand_t *ipifunc); + void (*ipi_free)(int vector); /* LVT */ int (*set_lvt_mask)(u_int, u_int, u_char); @@ -397,6 +403,20 @@ } static inline int +lapic_ipi_alloc(inthand_t *ipifunc) +{ + + return (apic_ops.ipi_alloc(ipifunc)); +} + +static inline void +lapic_ipi_free(int vector) +{ + + return (apic_ops.ipi_free(vector)); +} + +static inline int lapic_set_lvt_mask(u_int apic_id, u_int lvt, u_char masked) { Index: head/sys/x86/x86/local_apic.c =================================================================== --- head/sys/x86/x86/local_apic.c +++ head/sys/x86/x86/local_apic.c @@ -303,6 +303,8 @@ enum intr_polarity pol); static int native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger); +static int native_lapic_ipi_alloc(inthand_t *ipifunc); +static void native_lapic_ipi_free(int vector); struct apic_ops apic_ops = { .create = native_lapic_create, @@ -329,6 +331,8 @@ .ipi_raw = native_lapic_ipi_raw, .ipi_vectored = native_lapic_ipi_vectored, .ipi_wait = native_lapic_ipi_wait, + .ipi_alloc = native_lapic_ipi_alloc, + .ipi_free = native_lapic_ipi_free, #endif .set_lvt_mask = native_lapic_set_lvt_mask, .set_lvt_mode = native_lapic_set_lvt_mode, @@ -1761,4 +1765,60 @@ } #endif /* DETECT_DEADLOCK */ } + +/* + * Since the IDT is shared by all CPUs the IPI slot update needs to be globally + * visible. + * + * Consider the case where an IPI is generated immediately after allocation: + * vector = lapic_ipi_alloc(ipifunc); + * ipi_selected(other_cpus, vector); + * + * In xAPIC mode a write to ICR_LO has serializing semantics because the + * APIC page is mapped as an uncached region. In x2APIC mode there is an + * explicit 'mfence' before the ICR MSR is written. Therefore in both cases + * the IDT slot update is globally visible before the IPI is delivered. + */ +static int +native_lapic_ipi_alloc(inthand_t *ipifunc) +{ + struct gate_descriptor *ip; + long func; + int idx, vector; + + KASSERT(ipifunc != &IDTVEC(rsvd), ("invalid ipifunc %p", ipifunc)); + + vector = -1; + mtx_lock_spin(&icu_lock); + for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) { + ip = &idt[idx]; + func = (ip->gd_hioffset << 16) | ip->gd_looffset; + if (func == (uintptr_t)&IDTVEC(rsvd)) { + vector = idx; + setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC); + break; + } + } + mtx_unlock_spin(&icu_lock); + return (vector); +} + +static void +native_lapic_ipi_free(int vector) +{ + struct gate_descriptor *ip; + long func; + + KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST, + ("%s: invalid vector %d", __func__, vector)); + + mtx_lock_spin(&icu_lock); + ip = &idt[vector]; + func = (ip->gd_hioffset << 16) | ip->gd_looffset; + KASSERT(func != (uintptr_t)&IDTVEC(rsvd), + ("invalid idtfunc %#lx", func)); + setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC); + mtx_unlock_spin(&icu_lock); +} + #endif /* SMP */ Index: head/sys/x86/xen/xen_apic.c =================================================================== --- head/sys/x86/xen/xen_apic.c +++ head/sys/x86/xen/xen_apic.c @@ -311,7 +311,22 @@ XEN_APIC_UNSUPPORTED; return (0); } -#endif + +static int +xen_pv_lapic_ipi_alloc(inthand_t *ipifunc) +{ + + XEN_APIC_UNSUPPORTED; + return (-1); +} + +static void +xen_pv_lapic_ipi_free(int vector) +{ + + XEN_APIC_UNSUPPORTED; +} +#endif /* SMP */ static int xen_pv_lapic_set_lvt_mask(u_int apic_id, u_int lvt, u_char masked) @@ -372,6 +387,8 @@ .ipi_raw = xen_pv_lapic_ipi_raw, .ipi_vectored = xen_pv_lapic_ipi_vectored, .ipi_wait = xen_pv_lapic_ipi_wait, + .ipi_alloc = xen_pv_lapic_ipi_alloc, + .ipi_free = xen_pv_lapic_ipi_free, #endif .set_lvt_mask = xen_pv_lapic_set_lvt_mask, .set_lvt_mode = xen_pv_lapic_set_lvt_mode,