Index: sys/x86/xen/xen_intr.c =================================================================== --- sys/x86/xen/xen_intr.c +++ sys/x86/xen/xen_intr.c @@ -74,6 +74,13 @@ static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); +/* + * Lock for x86-related structures. Notably modifying + * xen_intr_auto_vector_count, and allocating interrupts require this lock be + * held. + */ +static struct mtx xen_intr_x86_lock; + static u_int first_evtchn_irq; /** @@ -158,6 +165,14 @@ .pic_assign_cpu = xen_intr_assign_cpu }; +/* + * Lock for interrupt core data. Notably modifying xen_intr_port_to_isrc[], + * or isrc->xi_port (which implies the former) requires this lock be held. + * + * Any time this lock is not held, the condition + * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)` + * MUST be true for all values of i which are valid indicies of the array. + */ static struct mtx xen_intr_isrc_lock; static u_int xen_intr_auto_vector_count; static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; @@ -272,7 +287,7 @@ { int isrc_idx; - KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held")); + mtx_assert(&xen_intr_x86_lock, MA_OWNED); for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { struct xenisrc *isrc; @@ -306,20 +321,27 @@ struct xenisrc *isrc; unsigned int vector; - KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held")); + mtx_lock(&xen_intr_x86_lock); + isrc = xen_intr_find_unused_isrc(type); + + if (isrc != NULL) { + mtx_unlock(&xen_intr_x86_lock); + return (isrc); + } if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { if (!warned) { warned = 1; printf("%s: Xen interrupts exhausted.\n", __func__); } + mtx_unlock(&xen_intr_x86_lock); return (NULL); } vector = first_evtchn_irq + xen_intr_auto_vector_count; xen_intr_auto_vector_count++; - mtx_unlock(&xen_intr_isrc_lock); + mtx_unlock(&xen_intr_x86_lock); isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; isrc->xi_arch.xai_vector = vector; @@ -328,7 +350,6 @@ free(isrc, M_XENINTR); return (NULL); } - mtx_lock(&xen_intr_isrc_lock); return (isrc); } @@ -363,12 +384,21 @@ xen_intr_port_to_isrc[isrc->xi_port] = NULL; } + /* not reachable from xen_intr_port_to_isrc[], therefore unlock */ + mtx_unlock(&xen_intr_isrc_lock); isrc->xi_cpu = 0; - isrc->xi_type = EVTCHN_TYPE_UNBOUND; isrc->xi_port = ~0; isrc->xi_cookie = NULL; - mtx_unlock(&xen_intr_isrc_lock); + /* + * Fun with locking here. xen_intr_x86_lock is actually controlling + * *allocation*. This means the isrc isn't under control of the lock + * until ->xi_type == EVTCHN_TYPE_UNBOUND. The consequence is + * atomic_store_rel() is appropriate since we merely need the other + * stores to complete before this one. This one simply needs to + * complete atomically. + */ + atomic_store_rel_32(&isrc->xi_type, EVTCHN_TYPE_UNBOUND); return (0); } @@ -408,15 +438,10 @@ return (EINVAL); } + isrc = xen_intr_alloc_isrc(type); + if (isrc == NULL) + return (ENOSPC); mtx_lock(&xen_intr_isrc_lock); - isrc = xen_intr_find_unused_isrc(type); - if (isrc == NULL) { - isrc = xen_intr_alloc_isrc(type); - if (isrc == NULL) { - mtx_unlock(&xen_intr_isrc_lock); - return (ENOSPC); - } - } isrc->xi_port = local_port; xen_intr_port_to_isrc[isrc->xi_port] = isrc; refcount_init(&isrc->xi_refcount, 1); @@ -608,6 +633,8 @@ mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); + mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); + /* * Set the per-cpu mask of CPU#0 to enable all, since by default all * event channels are bound to CPU#0.