Changeset View
Changeset View
Standalone View
Standalone View
sys/xen/xen_intr.c
Show First 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | ||||
.last_processed_l1i = LONG_BIT - 1, | .last_processed_l1i = LONG_BIT - 1, | ||||
.last_processed_l2i = LONG_BIT - 1 | .last_processed_l2i = LONG_BIT - 1 | ||||
}; | }; | ||||
DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | ||||
#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ | |||||
#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) | |||||
struct xenisrc { | struct xenisrc { | ||||
xen_arch_isrc_t xi_arch; /* @TOP -> *xi_arch=*xenisrc */ | xen_arch_isrc_t xi_arch; /* @TOP -> *xi_arch=*xenisrc */ | ||||
enum evtchn_type xi_type; | enum evtchn_type xi_type; | ||||
int xi_cpu; /* VCPU for delivery. */ | int xi_cpu; /* VCPU for delivery. */ | ||||
evtchn_port_t xi_port; | evtchn_port_t xi_port; | ||||
int xi_virq; | int xi_virq; | ||||
void *xi_cookie; | void *xi_cookie; | ||||
u_int xi_close:1; /* close on unbind? */ | u_int xi_close:1; /* close on unbind? */ | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | |||||
* This API is used to manage the port<=>CPU binding of event | * This API is used to manage the port<=>CPU binding of event | ||||
* channel handlers. | * channel handlers. | ||||
* | * | ||||
* \note This operation does not preclude reception of an event | * \note This operation does not preclude reception of an event | ||||
* for this event channel on another CPU. To mask the | * for this event channel on another CPU. To mask the | ||||
* event channel globally, use evtchn_mask(). | * event channel globally, use evtchn_mask(). | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port) | xen_intr_cpu_mask_port(u_int cpu, evtchn_port_t port) | ||||
{ | { | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | ||||
xen_clear_bit(port, pcpu->evtchn_enabled); | xen_clear_bit(port, pcpu->evtchn_enabled); | ||||
} | } | ||||
/** | /** | ||||
* Enable signal delivery for an event channel port on the | * Enable signal delivery for an event channel port on the | ||||
* specified CPU. | * specified CPU. | ||||
* | * | ||||
* \param port The event channel port to unmask. | * \param port The event channel port to unmask. | ||||
* | * | ||||
* This API is used to manage the port<=>CPU binding of event | * This API is used to manage the port<=>CPU binding of event | ||||
* channel handlers. | * channel handlers. | ||||
* | * | ||||
* \note This operation does not guarantee that event delivery | * \note This operation does not guarantee that event delivery | ||||
* is enabled for this event channel port. The port must | * is enabled for this event channel port. The port must | ||||
* also be globally enabled. See evtchn_unmask(). | * also be globally enabled. See evtchn_unmask(). | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port) | xen_intr_cpu_unmask_port(u_int cpu, evtchn_port_t port) | ||||
{ | { | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | ||||
xen_set_bit(port, pcpu->evtchn_enabled); | xen_set_bit(port, pcpu->evtchn_enabled); | ||||
} | } | ||||
/** | /** | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | xen_intr_alloc_isrc(enum evtchn_type type) | ||||
mtx_lock(&xen_intr_x86_lock); | mtx_lock(&xen_intr_x86_lock); | ||||
isrc = xen_intr_find_unused_isrc(type); | isrc = xen_intr_find_unused_isrc(type); | ||||
if (isrc != NULL) { | if (isrc != NULL) { | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (isrc); | return (isrc); | ||||
} | } | ||||
if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { | if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { | ||||
if (!warned) { | if (!warned) { | ||||
warned = 1; | warned = 1; | ||||
printf("%s: Event channels exhausted.\n", __func__); | printf("%s: Event channels exhausted.\n", __func__); | ||||
} | } | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
Show All 26 Lines | xen_intr_release_isrc(struct xenisrc *isrc) | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
KASSERT(isrc->xi_arch.xai_intsrc.is_handlers == 0, | KASSERT(isrc->xi_arch.xai_intsrc.is_handlers == 0, | ||||
("Release called, but xenisrc still in use")); | ("Release called, but xenisrc still in use")); | ||||
evtchn_mask_port(isrc->xi_port); | evtchn_mask_port(isrc->xi_port); | ||||
evtchn_clear_port(isrc->xi_port); | evtchn_clear_port(isrc->xi_port); | ||||
/* Rebind port to CPU 0. */ | /* Rebind port to CPU 0. */ | ||||
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | ||||
evtchn_cpu_unmask_port(0, isrc->xi_port); | xen_intr_cpu_unmask_port(0, isrc->xi_port); | ||||
if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { | if (isrc->xi_close != 0 && isrc->xi_port < NR_EVENT_CHANNELS) { | ||||
ehem_freebsd_m5p.com: I need a double-check here. Right now event channel 0 is never used, so this extra test never… | |||||
struct evtchn_close close = { .port = isrc->xi_port }; | struct evtchn_close close = { .port = isrc->xi_port }; | ||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | ||||
panic("EVTCHNOP_close failed"); | panic("EVTCHNOP_close failed"); | ||||
} | } | ||||
xen_intr_port_to_isrc[isrc->xi_port] = NULL; | xen_intr_port_to_isrc[isrc->xi_port] = NULL; | ||||
/* not reachable from xen_intr_port_to_isrc[], therefore unlock */ | /* not reachable from xen_intr_port_to_isrc[], therefore unlock */ | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
isrc->xi_cpu = 0; | isrc->xi_cpu = 0; | ||||
isrc->xi_port = 0; | isrc->xi_port = ~0; | ||||
isrc->xi_cookie = NULL; | isrc->xi_cookie = NULL; | ||||
mtx_lock(&xen_intr_x86_lock); | mtx_lock(&xen_intr_x86_lock); | ||||
isrc->xi_type = EVTCHN_TYPE_UNBOUND; | isrc->xi_type = EVTCHN_TYPE_UNBOUND; | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (0); | return (0); | ||||
} | } | ||||
/** | /** | ||||
Show All 32 Lines | if (port_handlep == NULL) { | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
isrc = xen_intr_alloc_isrc(type); | isrc = xen_intr_alloc_isrc(type); | ||||
if (isrc == NULL) | if (isrc == NULL) | ||||
return (ENOSPC); | return (ENOSPC); | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
isrc->xi_port = local_port; | isrc->xi_port = local_port; | ||||
xen_intr_port_to_isrc[local_port] = isrc; | xen_intr_port_to_isrc[isrc->xi_port] = isrc; | ||||
Done Inline ActionsThis struck me as a recipe for future trouble. The moment isrc->xi_port is valid, that should always be used with xen_intr_port_to_isrc[]. ehem_freebsd_m5p.com: This struck me as a recipe for future trouble. The moment isrc->xi_port is valid, **that**… | |||||
refcount_init(&isrc->xi_refcount, 1); | refcount_init(&isrc->xi_refcount, 1); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
/* Assign the opaque handler */ | /* Assign the opaque handler */ | ||||
*port_handlep = xen_intr_handle_from_isrc(isrc); | *port_handlep = xen_intr_handle_from_isrc(isrc); | ||||
#ifdef SMP | #ifdef SMP | ||||
if (type == EVTCHN_TYPE_PORT) { | if (type == EVTCHN_TYPE_PORT) { | ||||
▲ Show 20 Lines • Show All 453 Lines • ▼ Show 20 Lines | #ifdef SMP | ||||
u_int vcpu_id = PCPU_ID_GET(to_cpu); | u_int vcpu_id = PCPU_ID_GET(to_cpu); | ||||
int error, masked; | int error, masked; | ||||
if (!xen_has_percpu_evtchn()) | if (!xen_has_percpu_evtchn()) | ||||
return (EOPNOTSUPP); | return (EOPNOTSUPP); | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
isrc = (struct xenisrc *)base_isrc; | isrc = (struct xenisrc *)base_isrc; | ||||
if (!is_valid_evtchn(isrc->xi_port)) { | if (isrc->xi_port >= NR_EVENT_CHANNELS) { | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
/* | /* | ||||
* Mask the event channel while binding it to prevent interrupt | * Mask the event channel while binding it to prevent interrupt | ||||
* delivery with an inconsistent state in isrc->xi_cpu. | * delivery with an inconsistent state in isrc->xi_cpu. | ||||
*/ | */ | ||||
masked = evtchn_test_and_set_mask(isrc->xi_port); | masked = evtchn_test_and_set_mask(isrc->xi_port); | ||||
if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) || | if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) || | ||||
(isrc->xi_type == EVTCHN_TYPE_IPI)) { | (isrc->xi_type == EVTCHN_TYPE_IPI)) { | ||||
/* | /* | ||||
* Virtual IRQs are associated with a cpu by | * Virtual IRQs are associated with a cpu by | ||||
* the Hypervisor at evtchn_bind_virq time, so | * the Hypervisor at evtchn_bind_virq time, so | ||||
* all we need to do is update the per-CPU masks. | * all we need to do is update the per-CPU masks. | ||||
*/ | */ | ||||
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | ||||
isrc->xi_cpu = to_cpu; | isrc->xi_cpu = to_cpu; | ||||
evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); | xen_intr_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); | ||||
goto out; | goto out; | ||||
} | } | ||||
bind_vcpu.port = isrc->xi_port; | bind_vcpu.port = isrc->xi_port; | ||||
bind_vcpu.vcpu = vcpu_id; | bind_vcpu.vcpu = vcpu_id; | ||||
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu); | error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu); | ||||
if (isrc->xi_cpu != to_cpu) { | if (isrc->xi_cpu != to_cpu) { | ||||
if (error == 0) { | if (error == 0) { | ||||
/* Commit to new binding by removing the old one. */ | /* Commit to new binding by removing the old one. */ | ||||
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | xen_intr_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); | ||||
isrc->xi_cpu = to_cpu; | isrc->xi_cpu = to_cpu; | ||||
evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); | xen_intr_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
if (masked == 0) | if (masked == 0) | ||||
evtchn_unmask_port(isrc->xi_port); | evtchn_unmask_port(isrc->xi_port); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
return (0); | return (0); | ||||
▲ Show 20 Lines • Show All 370 Lines • ▼ Show 20 Lines | xen_intr_add_handler(const char *name, driver_filter_t filter, | ||||
return (error); | return (error); | ||||
} | } | ||||
int | int | ||||
xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) | xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep) | ||||
{ | { | ||||
if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) | /* event channel 0 is reserved, >= NR_EVENT_CHANNELS is invalid */ | ||||
if (port == 0 || port >= NR_EVENT_CHANNELS) | |||||
return (EINVAL); | return (EINVAL); | ||||
if (handlep == NULL) { | if (handlep == NULL) { | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
if (xen_intr_port_to_isrc[port] == NULL) { | if (xen_intr_port_to_isrc[port] == NULL) { | ||||
▲ Show 20 Lines • Show All 74 Lines • Show Last 20 Lines |
I need a double-check here. Right now event channel 0 is never used, so this extra test never fails. My belief is if event channel 0 did see service, its structure should follow the standard format and xi_close should be set appropriately rather than handling this as a special case.