Index: sys/x86/include/xen/arch-intr.h =================================================================== --- sys/x86/include/xen/arch-intr.h +++ sys/x86/include/xen/arch-intr.h @@ -40,6 +40,4 @@ typedef struct xen_arch_isrc xen_arch_isrc_t; -extern struct pic xen_intr_pic; - #endif /* _MACHINE_X86_XEN_ARCH_INTR_H_ */ Index: sys/x86/xen/xen_arch_intr.c =================================================================== --- sys/x86/xen/xen_arch_intr.c +++ sys/x86/xen/xen_arch_intr.c @@ -48,6 +48,28 @@ #include #include +/* + * Lock for x86-related structures. Notably modifying + * xen_intr_auto_vector_count, and allocating interrupts require this lock be + * held. + */ +static struct mtx xen_intr_x86_lock; + +static u_int first_evtchn_irq; + +static u_int xen_intr_auto_vector_count; + + +void +xen_intr_alloc_irqs(void) +{ + + if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS) + panic("IRQ allocation overflow (num_msi_irqs too high?)"); + first_evtchn_irq = num_io_irqs; + num_io_irqs += NR_EVENT_CHANNELS; +} + /********************************* EVTCHN PIC ********************************/ static void @@ -175,7 +197,7 @@ /** * PIC interface for all event channel port types except physical IRQs. */ -struct pic xen_intr_pic = { +static struct pic xen_intr_pic = { .pic_enable_source = xen_intr_pic_enable_source, .pic_disable_source = xen_intr_pic_disable_source, .pic_eoi_source = xen_intr_pic_eoi_source, @@ -195,9 +217,123 @@ xen_arch_intr_init(void) { + mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); + intr_register_pic(&xen_intr_pic); } +/** + * Search for an already allocated but currently unused Xen interrupt + * source object. + * + * \param type Restrict the search to interrupt sources of the given + * type. + * + * \return A pointer to a free Xen interrupt source object or NULL. + */ +static struct xenisrc * +xen_intr_find_unused_isrc(enum evtchn_type type) +{ + u_int isrc_idx; + + mtx_assert(&xen_intr_x86_lock, MA_OWNED); + + for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { + struct xenisrc *isrc; + u_int vector; + + vector = first_evtchn_irq + isrc_idx; + isrc = (struct xenisrc *)intr_lookup_source(vector); + if (isrc != NULL + && isrc->xi_type == EVTCHN_TYPE_UNBOUND) { + KASSERT(!xen_arch_intr_has_handlers(isrc), + ("Free evtchn still has handlers")); + isrc->xi_type = type; + return (isrc); + } + } + return (NULL); +} + +/** + * Allocate a Xen interrupt source object. + * + * \param type The type of interrupt source to create. + * + * \return A pointer to a newly allocated Xen interrupt source + * object or NULL. + */ +struct xenisrc * +xen_arch_intr_alloc(struct malloc_type *mtype, const char *name __unused, + enum evtchn_type type, evtchn_port_t port __unused) +{ + static int warned; + struct xenisrc *isrc; + unsigned int vector; + + mtx_lock(&xen_intr_x86_lock); + isrc = xen_intr_find_unused_isrc(type); + + if (isrc != NULL) { + mtx_unlock(&xen_intr_x86_lock); + goto out; + } + + if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { + if (!warned) { + warned = 1; + printf("%s: Xen interrupts exhausted.\n", __func__); + } + mtx_unlock(&xen_intr_x86_lock); + return (NULL); + } + + vector = first_evtchn_irq + xen_intr_auto_vector_count; + xen_intr_auto_vector_count++; + + KASSERT((intr_lookup_source(vector) == NULL), + ("Trying to use an already allocated vector")); + + mtx_unlock(&xen_intr_x86_lock); + isrc = malloc(sizeof(*isrc), mtype, M_WAITOK | M_ZERO); + isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; + isrc->xi_arch.xai_vector = vector; + isrc->xi_type = type; + if (intr_register_source(&isrc->xi_arch.xai_intsrc) != 0) { + free(isrc, mtype); + return (NULL); + } + +out: +#ifdef SMP + if (type == EVTCHN_TYPE_PORT) { + /* + * By default all interrupts are assigned to vCPU#0 + * unless specified otherwise, so shuffle them to balance + * the interrupt load. + */ + isrc->xi_cpu = intr_next_cpu(0); + } +#endif + + return (isrc); +} + +void +xen_arch_intr_release(struct malloc_type *mtype __unused, struct xenisrc *isrc) +{ + isrc->xi_cpu = 0; + isrc->xi_port = ~0U; + isrc->xi_cookie = NULL; + /* + * Only when ->xi_type == EVTCHN_TYPE_UNBOUND is the isrc under control + * of xen_intr_x86_lock. + */ + mtx_lock(&xen_intr_x86_lock); + isrc->xi_type = EVTCHN_TYPE_UNBOUND; + mtx_unlock(&xen_intr_x86_lock); +} + bool xen_arch_intr_has_handlers(struct xenisrc *isrc) { Index: sys/xen/arch-intr.h =================================================================== --- sys/xen/arch-intr.h +++ sys/xen/arch-intr.h @@ -66,6 +66,9 @@ /******************* Functions implemented by each architecture **************/ void xen_arch_intr_init(void); +struct xenisrc *xen_arch_intr_alloc(struct malloc_type *mtype, const char *name, + enum evtchn_type type, evtchn_port_t port); +void xen_arch_intr_release(struct malloc_type *mtype, struct xenisrc *isrc); bool xen_arch_intr_has_handlers(struct xenisrc *isrc); bool xen_arch_intr_execute_handlers(struct xenisrc *isrc, struct trapframe *frame); Index: sys/xen/xen_intr.c =================================================================== --- sys/xen/xen_intr.c +++ sys/xen/xen_intr.c @@ -50,7 +50,6 @@ #include #include -#include #include #include @@ -72,15 +71,6 @@ static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); -/* - * Lock for x86-related structures. Notably modifying - * xen_intr_auto_vector_count, and allocating interrupts require this lock be - * held. - */ -static struct mtx xen_intr_x86_lock; - -static u_int first_evtchn_irq; - /** * Per-cpu event channel processing state. */ @@ -130,7 +120,6 @@ * Acquire/release operations for isrc->xi_refcount require this lock be held. */ static struct mtx xen_intr_isrc_lock; -static u_int xen_intr_auto_vector_count; static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; /*------------------------- Private Functions --------------------------------*/ @@ -229,102 +218,6 @@ intrcnt_add(buf, &pcpu->evtchn_intrcnt); } -/** - * Search for an already allocated but currently unused Xen interrupt - * source object. - * - * \param type Restrict the search to interrupt sources of the given - * type. - * - * \return A pointer to a free Xen interrupt source object or NULL. - */ -static struct xenisrc * -xen_intr_find_unused_isrc(enum evtchn_type type) -{ - u_int isrc_idx; - - mtx_assert(&xen_intr_x86_lock, MA_OWNED); - - for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { - struct xenisrc *isrc; - u_int vector; - - vector = first_evtchn_irq + isrc_idx; - isrc = (struct xenisrc *)intr_lookup_source(vector); - if (isrc != NULL - && isrc->xi_type == EVTCHN_TYPE_UNBOUND) { - KASSERT(!xen_arch_intr_has_handlers(isrc), - ("Free evtchn still has handlers")); - isrc->xi_type = type; - return (isrc); - } - } - return (NULL); -} - -/** - * Allocate a Xen interrupt source object. - * - * \param type The type of interrupt source to create. - * - * \return A pointer to a newly allocated Xen interrupt source - * object or NULL. - */ -static struct xenisrc * -xen_intr_alloc_isrc(enum evtchn_type type) -{ - static int warned; - struct xenisrc *isrc; - unsigned int vector; - - mtx_lock(&xen_intr_x86_lock); - isrc = xen_intr_find_unused_isrc(type); - - if (isrc != NULL) { - mtx_unlock(&xen_intr_x86_lock); - goto out; - } - - if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { - if (!warned) { - warned = 1; - printf("%s: Xen interrupts exhausted.\n", __func__); - } - mtx_unlock(&xen_intr_x86_lock); - return (NULL); - } - - vector = first_evtchn_irq + xen_intr_auto_vector_count; - xen_intr_auto_vector_count++; - - KASSERT((intr_lookup_source(vector) == NULL), - ("Trying to use an already allocated vector")); - - mtx_unlock(&xen_intr_x86_lock); - isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); - isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; - isrc->xi_arch.xai_vector = vector; - isrc->xi_type = type; - if (intr_register_source(&isrc->xi_arch.xai_intsrc) != 0) { - free(isrc, M_XENINTR); - return (NULL); - } - -out: -#ifdef SMP - if (type == EVTCHN_TYPE_PORT) { - /* - * By default all interrupts are assigned to vCPU#0 - * unless specified otherwise, so shuffle them to balance - * the interrupt load. - */ - isrc->xi_cpu = intr_next_cpu(0); - } -#endif - - return (isrc); -} - /** * Attempt to free an active Xen interrupt source object. * @@ -360,16 +253,8 @@ /* not reachable from xen_intr_port_to_isrc[], therefore unlock */ mtx_unlock(&xen_intr_isrc_lock); - isrc->xi_cpu = 0; - isrc->xi_port = ~0U; - isrc->xi_cookie = NULL; - /* - * Only when ->xi_type == EVTCHN_TYPE_UNBOUND is the isrc under control - * of xen_intr_x86_lock. - */ - mtx_lock(&xen_intr_x86_lock); - isrc->xi_type = EVTCHN_TYPE_UNBOUND; - mtx_unlock(&xen_intr_x86_lock); + xen_arch_intr_release(M_XENINTR, isrc); + return (0); } @@ -409,7 +294,7 @@ return (EINVAL); } - isrc = xen_intr_alloc_isrc(type); + isrc = xen_arch_intr_alloc(M_XENINTR, intr_owner, type, local_port); if (isrc == NULL) return (ENOSPC); mtx_lock(&xen_intr_isrc_lock); @@ -606,8 +491,6 @@ mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); - mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); - /* * Set the per-cpu mask of CPU#0 to enable all, since by default all * event channels are bound to CPU#0. @@ -647,16 +530,6 @@ } SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL); -void -xen_intr_alloc_irqs(void) -{ - - if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS) - panic("IRQ allocation overflow (num_msi_irqs too high?)"); - first_evtchn_irq = num_io_irqs; - num_io_irqs += NR_EVENT_CHANNELS; -} - /*--------------------------- Common PIC Functions ---------------------------*/ /** * Prepare this PIC for system suspension.