Index: sys/x86/xen/xen_intr.c =================================================================== --- sys/x86/xen/xen_intr.c +++ sys/x86/xen/xen_intr.c @@ -113,9 +113,8 @@ DPCPU_DECLARE(struct vcpu_info *, vcpu_info); -#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ - -#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) +#define INVALID_EVTCHN (~(evtchn_port_t)0) /* Invalid event channel */ +#define is_valid_evtchn(x) ((x) < NR_EVENT_CHANNELS) struct xenisrc { struct intsrc xi_intsrc; @@ -310,10 +309,10 @@ KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held")); - if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { + if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { if (!warned) { warned = 1; - printf("%s: Event channels exhausted.\n", __func__); + printf("%s: Xen interrupts exhausted.\n", __func__); } return (NULL); } @@ -346,26 +345,29 @@ xen_intr_release_isrc(struct xenisrc *isrc) { - mtx_lock(&xen_intr_isrc_lock); KASSERT(isrc->xi_intsrc.is_handlers == 0, ("Release called, but xenisrc still in use")); - evtchn_mask_port(isrc->xi_port); - evtchn_clear_port(isrc->xi_port); + mtx_lock(&xen_intr_isrc_lock); + if (is_valid_evtchn(isrc->xi_port)) { + evtchn_mask_port(isrc->xi_port); + evtchn_clear_port(isrc->xi_port); - /* Rebind port to CPU 0. */ - evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); - evtchn_cpu_unmask_port(0, isrc->xi_port); + /* Rebind port to CPU 0. */ + evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); + evtchn_cpu_unmask_port(0, isrc->xi_port); - if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { - struct evtchn_close close = { .port = isrc->xi_port }; - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) - panic("EVTCHNOP_close failed"); - } + if (isrc->xi_close != 0) { + struct evtchn_close close = { .port = isrc->xi_port }; + + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) + panic("EVTCHNOP_close failed"); + } - xen_intr_port_to_isrc[isrc->xi_port] = NULL; + xen_intr_port_to_isrc[isrc->xi_port] = NULL; + } isrc->xi_cpu = 0; isrc->xi_type = EVTCHN_TYPE_UNBOUND; - isrc->xi_port = 0; + isrc->xi_port = INVALID_EVTCHN; isrc->xi_cookie = NULL; mtx_unlock(&xen_intr_isrc_lock); return (0); @@ -605,6 +607,12 @@ if (!xen_domain()) return (0); + CTASSERT(is_valid_evtchn(0)); + CTASSERT(is_valid_evtchn(NR_EVENT_CHANNELS - 1)); + CTASSERT(!is_valid_evtchn(NR_EVENT_CHANNELS)); + CTASSERT(!is_valid_evtchn(~(evtchn_port_t)0)); + CTASSERT(!is_valid_evtchn(INVALID_EVTCHN)); + mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); /* @@ -761,7 +769,7 @@ vector = first_evtchn_irq + isrc_idx; isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc != NULL) { - isrc->xi_port = 0; + isrc->xi_port = INVALID_EVTCHN; switch (isrc->xi_type) { case EVTCHN_TYPE_IPI: xen_rebind_ipi(isrc); @@ -1284,7 +1292,7 @@ xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) { - if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) + if (!is_valid_evtchn(port)) return (EINVAL); if (handlep == NULL) {