Index: sys/xen/xen_intr.c =================================================================== --- sys/xen/xen_intr.c +++ sys/xen/xen_intr.c @@ -116,17 +116,21 @@ #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) +struct xen_arch_isrc { + struct intsrc xai_intsrc; /* @TOP -> *xen_arch_isrc */ + int xai_vector; /* Global isrc vector number */ +}; + +typedef struct xen_arch_isrc xen_arch_isrc_t; + struct xenisrc { - struct intsrc xi_intsrc; + xen_arch_isrc_t xi_arch; /* @TOP -> *xi_arch=*xenisrc */ enum evtchn_type xi_type; int xi_cpu; /* VCPU for delivery. */ - int xi_vector; /* Global isrc vector number. */ evtchn_port_t xi_port; int xi_virq; void *xi_cookie; u_int xi_close:1; /* close on unbind? */ - u_int xi_activehi:1; - u_int xi_edgetrigger:1; u_int xi_masked:1; volatile u_int xi_refcount; }; @@ -285,7 +289,7 @@ isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc != NULL && isrc->xi_type == EVTCHN_TYPE_UNBOUND) { - KASSERT(isrc->xi_intsrc.is_handlers == 0, + KASSERT(isrc->xi_arch.xai_intsrc.is_handlers == 0, ("Free evtchn still has handlers")); isrc->xi_type = type; return (isrc); @@ -327,10 +331,10 @@ mtx_unlock(&xen_intr_isrc_lock); isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); - isrc->xi_intsrc.is_pic = &xen_intr_pic; - isrc->xi_vector = vector; + isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; + isrc->xi_arch.xai_vector = vector; isrc->xi_type = type; - intr_register_source(&isrc->xi_intsrc); + intr_register_source(&isrc->xi_arch.xai_intsrc); mtx_lock(&xen_intr_isrc_lock); return (isrc); @@ -348,7 +352,7 @@ { mtx_lock(&xen_intr_isrc_lock); - KASSERT(isrc->xi_intsrc.is_handlers == 0, + KASSERT(isrc->xi_arch.xai_intsrc.is_handlers == 0, ("Release called, but xenisrc still in use")); evtchn_mask_port(isrc->xi_port); evtchn_clear_port(isrc->xi_port); @@ -432,7 +436,7 @@ * unless specified otherwise, so shuffle them to balance * the interrupt load. */ - xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu(0)); + xen_intr_assign_cpu(&isrc->xi_arch.xai_intsrc, intr_next_cpu(0)); } #endif @@ -571,7 +575,7 @@ ("Received unexpected event on vCPU#%d, event bound to vCPU#%d", PCPU_GET(cpuid), isrc->xi_cpu)); - intr_execute_handlers(&isrc->xi_intsrc, trap_frame); + intr_execute_handlers(&isrc->xi_arch.xai_intsrc, trap_frame); /* * If this is the final port processed, @@ -731,7 +735,7 @@ isrc->xi_cpu = 0; #ifdef SMP - error = xen_intr_assign_cpu(&isrc->xi_intsrc, + error = xen_intr_assign_cpu(&isrc->xi_arch.xai_intsrc, cpu_apic_ids[cpu]); if (error) panic("unable to bind xen intr#%d to CPU#%d: %d", @@ -826,7 +830,7 @@ { struct xenisrc *isrc = (struct xenisrc *)base_isrc; - return (isrc->xi_vector); + return (isrc->xi_arch.xai_vector); } /** @@ -1124,7 +1128,7 @@ #ifdef SMP if (error == 0) - error = intr_event_bind(isrc->xi_intsrc.is_event, cpu); + error = intr_event_bind(isrc->xi_arch.xai_intsrc.is_event, cpu); #endif if (error != 0) { @@ -1144,7 +1148,7 @@ * masks manually so events can't fire on the wrong cpu * during AP startup. */ - xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); + xen_intr_assign_cpu(&isrc->xi_arch.xai_intsrc, cpu_apic_ids[cpu]); } #endif @@ -1201,7 +1205,7 @@ * masks manually so events can't fire on the wrong cpu * during AP startup. */ - xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); + xen_intr_assign_cpu(&isrc->xi_arch.xai_intsrc, cpu_apic_ids[cpu]); } /* @@ -1230,7 +1234,7 @@ va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); - return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr)); + return (intr_describe(isrc->xi_arch.xai_vector, isrc->xi_cookie, descr)); } void @@ -1297,7 +1301,7 @@ if (isrc == NULL || isrc->xi_cookie != NULL) return (EINVAL); - error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg, + error = intr_add_handler(name, isrc->xi_arch.xai_vector,filter, handler, arg, flags|INTR_EXCL, &isrc->xi_cookie, 0); if (error != 0) printf("%s: %s: add handler failed: %d\n", name, __func__,