Index: sys/xen/xen_intr.c =================================================================== --- sys/xen/xen_intr.c +++ sys/xen/xen_intr.c @@ -121,10 +121,6 @@ DPCPU_DECLARE(struct vcpu_info *, vcpu_info); -#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ - -#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) - struct xenisrc { xen_arch_isrc_t xi_arch; /* @TOP -> *xi_arch=*xenisrc */ enum evtchn_type xi_type; @@ -226,7 +222,7 @@ * event channel globally, use evtchn_mask(). */ static inline void -evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port) +xen_intr_cpu_mask_port(u_int cpu, evtchn_port_t port) { struct xen_intr_pcpu_data *pcpu; @@ -248,7 +244,7 @@ * also be globally enabled. See evtchn_unmask(). */ static inline void -evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port) +xen_intr_cpu_unmask_port(u_int cpu, evtchn_port_t port) { struct xen_intr_pcpu_data *pcpu; @@ -331,7 +327,7 @@ return (isrc); } - if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { + if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { if (!warned) { warned = 1; printf("%s: Event channels exhausted.\n", __func__); @@ -374,10 +370,10 @@ evtchn_clear_port(isrc->xi_port); /* Rebind port to CPU 0. */ - evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); - evtchn_cpu_unmask_port(0, isrc->xi_port); + xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); + xen_intr_cpu_unmask_port(0, isrc->xi_port); - if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { + if (isrc->xi_close != 0) { struct evtchn_close close = { .port = isrc->xi_port }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); @@ -388,7 +384,7 @@ mtx_unlock(&xen_intr_isrc_lock); isrc->xi_cpu = 0; - isrc->xi_port = 0; + isrc->xi_port = ~0; isrc->xi_cookie = NULL; mtx_lock(&xen_intr_x86_lock); isrc->xi_type = EVTCHN_TYPE_UNBOUND; @@ -438,7 +434,7 @@ return (ENOSPC); mtx_lock(&xen_intr_isrc_lock); isrc->xi_port = local_port; - xen_intr_port_to_isrc[local_port] = isrc; + xen_intr_port_to_isrc[isrc->xi_port] = isrc; refcount_init(&isrc->xi_refcount, 1); mtx_unlock(&xen_intr_isrc_lock); @@ -738,7 +734,7 @@ int cpu = isrc->xi_cpu; int error; - isrc->xi_port = 0; + isrc->xi_port = ~0; switch (isrc->xi_type) { case EVTCHN_TYPE_IPI: xen_rebind_ipi(isrc); @@ -917,7 +913,7 @@ mtx_lock(&xen_intr_isrc_lock); isrc = (struct xenisrc *)base_isrc; - if (!is_valid_evtchn(isrc->xi_port)) { + if (isrc->xi_port >= NR_EVENT_CHANNELS) { mtx_unlock(&xen_intr_isrc_lock); return (EINVAL); } @@ -934,9 +930,9 @@ * the Hypervisor at evtchn_bind_virq time, so * all we need to do is update the per-CPU masks. */ - evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); + xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); isrc->xi_cpu = to_cpu; - evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); + xen_intr_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); goto out; } @@ -947,9 +943,9 @@ if (isrc->xi_cpu != to_cpu) { if (error == 0) { /* Commit to new binding by removing the old one. */ - evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); + xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); isrc->xi_cpu = to_cpu; - evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); + xen_intr_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); } } @@ -1335,7 +1331,8 @@ xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) { - if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) + /* event channel 0 is reserved, >= NR_EVENT_CHANNELS is invalid */ + if (port == 0 || port >= NR_EVENT_CHANNELS) return (EINVAL); if (handlep == NULL) {