Changeset View
Standalone View
sys/x86/xen/xen_intr.c
Show First 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | ||||
.last_processed_l1i = LONG_BIT - 1, | .last_processed_l1i = LONG_BIT - 1, | ||||
.last_processed_l2i = LONG_BIT - 1 | .last_processed_l2i = LONG_BIT - 1 | ||||
}; | }; | ||||
DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | ||||
#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ | |||||
#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) | |||||
struct xenisrc { | struct xenisrc { | ||||
struct intsrc xi_intsrc; | struct intsrc xi_intsrc; | ||||
enum evtchn_type xi_type; | enum evtchn_type xi_type; | ||||
u_int xi_cpu; /* VCPU for delivery. */ | u_int xi_cpu; /* VCPU for delivery. */ | ||||
int xi_vector; /* Global isrc vector number. */ | int xi_vector; /* Global isrc vector number. */ | ||||
evtchn_port_t xi_port; | evtchn_port_t xi_port; | ||||
u_int xi_virq; | u_int xi_virq; | ||||
void *xi_cookie; | void *xi_cookie; | ||||
▲ Show 20 Lines • Show All 191 Lines • ▼ Show 20 Lines | xen_intr_alloc_isrc(enum evtchn_type type) | ||||
mtx_lock(&xen_intr_x86_lock); | mtx_lock(&xen_intr_x86_lock); | ||||
isrc = xen_intr_find_unused_isrc(type); | isrc = xen_intr_find_unused_isrc(type); | ||||
if (isrc != NULL) { | if (isrc != NULL) { | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (isrc); | return (isrc); | ||||
} | } | ||||
if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { | if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { | ||||
if (!warned) { | if (!warned) { | ||||
warned = 1; | warned = 1; | ||||
printf("%s: Event channels exhausted.\n", __func__); | printf("%s: Xen interrupts exhausted.\n", __func__); | ||||
} | } | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
vector = first_evtchn_irq + xen_intr_auto_vector_count; | vector = first_evtchn_irq + xen_intr_auto_vector_count; | ||||
xen_intr_auto_vector_count++; | xen_intr_auto_vector_count++; | ||||
Show All 22 Lines | |||||
*/ | */ | ||||
static int | static int | ||||
xen_intr_release_isrc(struct xenisrc *isrc) | xen_intr_release_isrc(struct xenisrc *isrc) | ||||
{ | { | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
KASSERT(isrc->xi_intsrc.is_handlers == 0, | KASSERT(isrc->xi_intsrc.is_handlers == 0, | ||||
("Release called, but xenisrc still in use")); | ("Release called, but xenisrc still in use")); | ||||
evtchn_mask_port(isrc->xi_port); | evtchn_mask_port(isrc->xi_port); | ||||
evtchn_clear_port(isrc->xi_port); | evtchn_clear_port(isrc->xi_port); | ||||
/* Rebind port to CPU 0. */ | /* Rebind port to CPU 0. */ | ||||
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | ||||
evtchn_cpu_unmask_port(0, isrc->xi_port); | evtchn_cpu_unmask_port(0, isrc->xi_port); | ||||
if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { | if (isrc->xi_port < NR_EVENT_CHANNELS) { | ||||
ehem_freebsd_m5p.com: I //finally// figured out what was going on here. This is for EVTCHN_TYPE_PORT which can be… | |||||
Done Inline Actions^suspect^suspend ehem_freebsd_m5p.com: ^suspect^suspend
ie during suspend ->xi_port on these will be set invalid and may or may not be… | |||||
if (isrc->xi_close != 0) { | |||||
struct evtchn_close close = { .port = isrc->xi_port }; | struct evtchn_close close = { .port = isrc->xi_port }; | ||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | ||||
panic("EVTCHNOP_close failed"); | panic("EVTCHNOP_close failed"); | ||||
} | } | ||||
/* another thread could have grabbed if caller already closed */ | |||||
if (xen_intr_port_to_isrc[isrc->xi_port] == isrc) | |||||
xen_intr_port_to_isrc[isrc->xi_port] = NULL; | xen_intr_port_to_isrc[isrc->xi_port] = NULL; | ||||
} | |||||
ehem_freebsd_m5p.comAuthorUnsubmitted Done Inline ActionsI'm getting concerned about this entire section, due to the reason being pointed to near the end. If a channel is closed before xen_intr_release_isrc() is called (entirely possible for a PORT channel), then the channel number could be reused by Xen before this section is called. That in turn would cause problems not just for this entire section, but mask handling as well. In particular xen_intr_bind_isrc() seems to assume the channel is masked and bound to vCPU 0. This would suggest the entire section should be under a if (isrc->xi_port < NR_EVENT_CHANNELS && xen_intr_port_to_isrc[isrc->xi_port] == isrc) { test. ehem_freebsd_m5p.com: I'm getting concerned about this entire section, due to the reason being pointed to near the… | |||||
/* not reachable from xen_intr_port_to_isrc[], therefore unlock */ | /* not reachable from xen_intr_port_to_isrc[], therefore unlock */ | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
Done Inline ActionsThis is actually a bug. If xi_port is cleared due to a suspend and never reset, then this line will clear an invalid element of xen_intr_port_to_isrc[]. Previously this hadn't caused problems since xi_port was being cleared to 0 (which is never allocated), but with ~0 now being used this will manifest. ehem_freebsd_m5p.com: This is actually a **bug**. If xi_port is cleared due to a suspend and never reset, then this… | |||||
isrc->xi_cpu = 0; | isrc->xi_cpu = 0; | ||||
isrc->xi_port = 0; | isrc->xi_port = ~0U; | ||||
isrc->xi_cookie = NULL; | isrc->xi_cookie = NULL; | ||||
/* | /* | ||||
* Fun with locking here. xen_intr_x86_lock is actually controlling | * Fun with locking here. xen_intr_x86_lock is actually controlling | ||||
* *allocation*. This means the isrc isn't under control of the lock | * *allocation*. This means the isrc isn't under control of the lock | ||||
* until ->xi_type == EVTCHN_TYPE_UNBOUND. The consequence is | * until ->xi_type == EVTCHN_TYPE_UNBOUND. The consequence is | ||||
* atomic_store_rel() is appropriate since we merely need the other | * atomic_store_rel() is appropriate since we merely need the other | ||||
* stores to complete before this one. This one simply needs to | * stores to complete before this one. This one simply needs to | ||||
* complete atomically. | * complete atomically. | ||||
Show All 38 Lines | if (port_handlep == NULL) { | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
isrc = xen_intr_alloc_isrc(type); | isrc = xen_intr_alloc_isrc(type); | ||||
if (isrc == NULL) | if (isrc == NULL) | ||||
return (ENOSPC); | return (ENOSPC); | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
isrc->xi_port = local_port; | isrc->xi_port = local_port; | ||||
xen_intr_port_to_isrc[local_port] = isrc; | xen_intr_port_to_isrc[isrc->xi_port] = isrc; | ||||
refcount_init(&isrc->xi_refcount, 1); | refcount_init(&isrc->xi_refcount, 1); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
/* Assign the opaque handler */ | /* Assign the opaque handler */ | ||||
*port_handlep = xen_intr_handle_from_isrc(isrc); | *port_handlep = xen_intr_handle_from_isrc(isrc); | ||||
#ifdef SMP | #ifdef SMP | ||||
if (type == EVTCHN_TYPE_PORT) { | if (type == EVTCHN_TYPE_PORT) { | ||||
▲ Show 20 Lines • Show All 249 Lines • ▼ Show 20 Lines | #ifdef SMP | ||||
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | ||||
&bind_ipi); | &bind_ipi); | ||||
if (error != 0) | if (error != 0) | ||||
panic("unable to rebind xen IPI: %d", error); | panic("unable to rebind xen IPI: %d", error); | ||||
isrc->xi_port = bind_ipi.port; | isrc->xi_port = bind_ipi.port; | ||||
isrc->xi_cpu = 0; | isrc->xi_cpu = 0; | ||||
xen_intr_port_to_isrc[bind_ipi.port] = isrc; | xen_intr_port_to_isrc[isrc->xi_port] = isrc; | ||||
error = xen_intr_assign_cpu(&isrc->xi_intsrc, | error = xen_intr_assign_cpu(&isrc->xi_intsrc, | ||||
cpu_apic_ids[cpu]); | cpu_apic_ids[cpu]); | ||||
if (error) | if (error) | ||||
panic("unable to bind xen IPI to CPU#%d: %d", | panic("unable to bind xen IPI to CPU#%d: %d", | ||||
cpu, error); | cpu, error); | ||||
evtchn_unmask_port(bind_ipi.port); | evtchn_unmask_port(isrc->xi_port); | ||||
#else | #else | ||||
panic("Resume IPI event channel on UP"); | panic("Resume IPI event channel on UP"); | ||||
#endif | #endif | ||||
} | } | ||||
static void | static void | ||||
xen_rebind_virq(struct xenisrc *isrc) | xen_rebind_virq(struct xenisrc *isrc) | ||||
{ | { | ||||
u_int cpu = isrc->xi_cpu; | u_int cpu = isrc->xi_cpu; | ||||
u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; | u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; | ||||
int error; | int error; | ||||
struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq, | struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq, | ||||
.vcpu = vcpu_id }; | .vcpu = vcpu_id }; | ||||
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | ||||
&bind_virq); | &bind_virq); | ||||
if (error != 0) | if (error != 0) | ||||
panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error); | panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error); | ||||
isrc->xi_port = bind_virq.port; | isrc->xi_port = bind_virq.port; | ||||
isrc->xi_cpu = 0; | isrc->xi_cpu = 0; | ||||
xen_intr_port_to_isrc[bind_virq.port] = isrc; | xen_intr_port_to_isrc[isrc->xi_port] = isrc; | ||||
#ifdef SMP | #ifdef SMP | ||||
error = xen_intr_assign_cpu(&isrc->xi_intsrc, | error = xen_intr_assign_cpu(&isrc->xi_intsrc, | ||||
cpu_apic_ids[cpu]); | cpu_apic_ids[cpu]); | ||||
if (error) | if (error) | ||||
panic("unable to bind xen VIRQ#%d to CPU#%d: %d", | panic("unable to bind xen VIRQ#%d to CPU#%d: %d", | ||||
isrc->xi_virq, cpu, error); | isrc->xi_virq, cpu, error); | ||||
#endif | #endif | ||||
evtchn_unmask_port(bind_virq.port); | evtchn_unmask_port(isrc->xi_port); | ||||
ehem_freebsd_m5p.comAuthorUnsubmitted Done Inline ActionsI'm unsure whether it is worthwhile including the preference for the use of isrc->xi_port over the misc port sources here. I kind of like clearing these out, but at the end of the day it might be best to leave these alone and let them disappear with D30598 (which merges these together and requires using isrc->xi_port) and potentially D31188 (which would nuke the one in xen_intr_bind_isrc()). Say the word and these hunks will be dropped. ehem_freebsd_m5p.com: I'm unsure whether it is worthwhile including the preference for the use of `isrc->xi_port`… | |||||
} | } | ||||
/** | /** | ||||
* Return this PIC to service after being suspended. | * Return this PIC to service after being suspended. | ||||
*/ | */ | ||||
static void | static void | ||||
xen_intr_resume(struct pic *unused, bool suspend_cancelled) | xen_intr_resume(struct pic *unused, bool suspend_cancelled) | ||||
{ | { | ||||
Show All 23 Lines | xen_intr_resume(struct pic *unused, bool suspend_cancelled) | ||||
/* Free unused isrcs and rebind VIRQs and IPIs */ | /* Free unused isrcs and rebind VIRQs and IPIs */ | ||||
for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) { | for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) { | ||||
u_int vector; | u_int vector; | ||||
vector = first_evtchn_irq + isrc_idx; | vector = first_evtchn_irq + isrc_idx; | ||||
isrc = (struct xenisrc *)intr_lookup_source(vector); | isrc = (struct xenisrc *)intr_lookup_source(vector); | ||||
if (isrc != NULL) { | if (isrc != NULL) { | ||||
isrc->xi_port = 0; | isrc->xi_port = ~0U; | ||||
switch (isrc->xi_type) { | switch (isrc->xi_type) { | ||||
case EVTCHN_TYPE_IPI: | case EVTCHN_TYPE_IPI: | ||||
xen_rebind_ipi(isrc); | xen_rebind_ipi(isrc); | ||||
break; | break; | ||||
case EVTCHN_TYPE_VIRQ: | case EVTCHN_TYPE_VIRQ: | ||||
xen_rebind_virq(isrc); | xen_rebind_virq(isrc); | ||||
break; | break; | ||||
default: | default: | ||||
▲ Show 20 Lines • Show All 87 Lines • ▼ Show 20 Lines | #ifdef SMP | ||||
if (!xen_has_percpu_evtchn()) | if (!xen_has_percpu_evtchn()) | ||||
return (EOPNOTSUPP); | return (EOPNOTSUPP); | ||||
to_cpu = apic_cpuid(apic_id); | to_cpu = apic_cpuid(apic_id); | ||||
vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id; | vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id; | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
isrc = (struct xenisrc *)base_isrc; | isrc = (struct xenisrc *)base_isrc; | ||||
if (!is_valid_evtchn(isrc->xi_port)) { | if (isrc->xi_port >= NR_EVENT_CHANNELS) { | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
/* | /* | ||||
* Mask the event channel while binding it to prevent interrupt | * Mask the event channel while binding it to prevent interrupt | ||||
* delivery with an inconsistent state in isrc->xi_cpu. | * delivery with an inconsistent state in isrc->xi_cpu. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 402 Lines • ▼ Show 20 Lines | xen_intr_add_handler(const char *name, driver_filter_t filter, | ||||
return (error); | return (error); | ||||
} | } | ||||
int | int | ||||
xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) | xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) | ||||
{ | { | ||||
if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) | /* event channel 0 is reserved, >= NR_EVENT_CHANNELS is invalid */ | ||||
if (port == 0 || port >= NR_EVENT_CHANNELS) | |||||
return (EINVAL); | return (EINVAL); | ||||
if (handlep == NULL) { | if (handlep == NULL) { | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
if (xen_intr_port_to_isrc[port] == NULL) { | if (xen_intr_port_to_isrc[port] == NULL) { | ||||
▲ Show 20 Lines • Show All 74 Lines • Show Last 20 Lines |
I finally figured out what was going on here. This is for EVTCHN_TYPE_PORT which can be marked as to be closed, yet due to not being rebound during suspect can have an invalid port. This is an example of where mixing reserved 0 versus invalid 0 can confuse.