Index: sys/xen/xen_intr.c =================================================================== --- sys/xen/xen_intr.c +++ sys/xen/xen_intr.c @@ -72,6 +72,14 @@ static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); +/* + * Lock for x86-related structures. Notably modifying + * xen_intr_auto_vector_count, allocating/releasing interrupts require this + * lock to be held. This means modifying isrc->xi_type requires this lock to + * be held. + */ +static struct mtx xen_intr_x86_lock; + static u_int first_evtchn_irq; /** @@ -165,6 +173,10 @@ .pic_assign_cpu = xen_intr_assign_cpu }; +/* + * Lock for interrupt core data. Notably modifying xen_intr_port_to_isrc[], + * or isrc->xi_port (which implies the former) requires this lock be held. + */ static struct mtx xen_intr_isrc_lock; static u_int xen_intr_auto_vector_count; static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; @@ -279,7 +291,7 @@ { int isrc_idx; - KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held")); + mtx_assert(&xen_intr_x86_lock, MA_OWNED); for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { struct xenisrc *isrc; @@ -313,13 +325,20 @@ struct xenisrc *isrc; unsigned int vector; - KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held")); + mtx_lock(&xen_intr_x86_lock); + isrc = xen_intr_find_unused_isrc(type); + + if (isrc != NULL) { + mtx_unlock(&xen_intr_x86_lock); + return (isrc); + } if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { if (!warned) { warned = 1; printf("%s: Event channels exhausted.\n", __func__); } + mtx_unlock(&xen_intr_x86_lock); return (NULL); } @@ -329,13 +348,12 @@ KASSERT((intr_lookup_source(vector) == NULL), ("Trying to use an already allocated vector")); - mtx_unlock(&xen_intr_isrc_lock); + mtx_unlock(&xen_intr_x86_lock); isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; isrc->xi_arch.xai_vector = vector; isrc->xi_type = type; intr_register_source(&isrc->xi_arch.xai_intsrc); - mtx_lock(&xen_intr_isrc_lock); return (isrc); } @@ -368,11 +386,15 @@ } xen_intr_port_to_isrc[isrc->xi_port] = NULL; + /* not reachable from xen_intr_port_to_isrc[], therefore unlock */ + mtx_unlock(&xen_intr_isrc_lock); + isrc->xi_cpu = 0; - isrc->xi_type = EVTCHN_TYPE_UNBOUND; isrc->xi_port = 0; isrc->xi_cookie = NULL; - mtx_unlock(&xen_intr_isrc_lock); + mtx_lock(&xen_intr_x86_lock); + isrc->xi_type = EVTCHN_TYPE_UNBOUND; + mtx_unlock(&xen_intr_x86_lock); return (0); } @@ -413,15 +435,10 @@ return (EINVAL); } + isrc = xen_intr_alloc_isrc(type); + if (isrc == NULL) + return (ENOSPC); mtx_lock(&xen_intr_isrc_lock); - isrc = xen_intr_find_unused_isrc(type); - if (isrc == NULL) { - isrc = xen_intr_alloc_isrc(type); - if (isrc == NULL) { - mtx_unlock(&xen_intr_isrc_lock); - return (ENOSPC); - } - } isrc->xi_port = local_port; xen_intr_port_to_isrc[local_port] = isrc; refcount_init(&isrc->xi_refcount, 1); @@ -619,6 +636,8 @@ mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); + mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); + /* * Set the per-cpu mask of CPU#0 to enable all, since by default all * event channels are bound to CPU#0.