Index: sys/x86/xen/xen_intr.c =================================================================== --- sys/x86/xen/xen_intr.c +++ sys/x86/xen/xen_intr.c @@ -120,10 +120,6 @@ DPCPU_DECLARE(struct vcpu_info *, vcpu_info); -#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ - -#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) - struct xenisrc { struct intsrc xi_intsrc; enum evtchn_type xi_type; @@ -333,10 +329,10 @@ return (isrc); } - if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { + if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { if (!warned) { warned = 1; - printf("%s: Event channels exhausted.\n", __func__); + printf("%s: Xen interrupts exhausted.\n", __func__); } mtx_unlock(&xen_intr_x86_lock); return (NULL); @@ -372,28 +368,30 @@ xen_intr_release_isrc(struct xenisrc *isrc) { - mtx_lock(&xen_intr_isrc_lock); KASSERT(isrc->xi_intsrc.is_handlers == 0, ("Release called, but xenisrc still in use")); - evtchn_mask_port(isrc->xi_port); - evtchn_clear_port(isrc->xi_port); + mtx_lock(&xen_intr_isrc_lock); + if (isrc->xi_port < NR_EVENT_CHANNELS) { + evtchn_mask_port(isrc->xi_port); + evtchn_clear_port(isrc->xi_port); + + /* Rebind port to CPU 0. */ + evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); + evtchn_cpu_unmask_port(0, isrc->xi_port); - /* Rebind port to CPU 0. */ - evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); - evtchn_cpu_unmask_port(0, isrc->xi_port); + if (isrc->xi_close != 0) { + struct evtchn_close close = { .port = isrc->xi_port }; + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) + panic("EVTCHNOP_close failed"); + } - if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { - struct evtchn_close close = { .port = isrc->xi_port }; - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) - panic("EVTCHNOP_close failed"); + xen_intr_port_to_isrc[isrc->xi_port] = NULL; } - - xen_intr_port_to_isrc[isrc->xi_port] = NULL; /* not reachable from xen_intr_port_to_isrc[], unlock */ mtx_unlock(&xen_intr_isrc_lock); isrc->xi_cpu = 0; - isrc->xi_port = 0; + isrc->xi_port = ~0U; isrc->xi_cookie = NULL; /* * Only when ->xi_type == EVTCHN_TYPE_UNBOUND is the isrc under control @@ -447,6 +445,10 @@ isrc->xi_port = local_port; refcount_init(&isrc->xi_refcount, 1); mtx_lock(&xen_intr_isrc_lock); + if (xen_intr_port_to_isrc[isrc->xi_port] != NULL) { + xen_intr_port_to_isrc[isrc->xi_port]->xi_port = ~0U; + isrc->xi_cpu = xen_intr_port_to_isrc[isrc->xi_port]->xi_cpu; + } xen_intr_port_to_isrc[isrc->xi_port] = isrc; mtx_unlock(&xen_intr_isrc_lock); @@ -792,7 +794,7 @@ vector = first_evtchn_irq + isrc_idx; isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc != NULL) { - isrc->xi_port = 0; + isrc->xi_port = ~0U; switch (isrc->xi_type) { case EVTCHN_TYPE_IPI: xen_rebind_ipi(isrc); @@ -896,7 +898,7 @@ mtx_lock(&xen_intr_isrc_lock); isrc = (struct xenisrc *)base_isrc; - if (!is_valid_evtchn(isrc->xi_port)) { + if (isrc->xi_port >= NR_EVENT_CHANNELS) { mtx_unlock(&xen_intr_isrc_lock); return (EINVAL); } @@ -1315,7 +1317,8 @@ xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) { - if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) + /* event channel 0 is reserved, >= NR_EVENT_CHANNELS is invalid */ + if (port == 0 || port >= NR_EVENT_CHANNELS) return (EINVAL); if (handlep == NULL) {