Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/xen/xen_intr.c
Show First 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | |||||
#include <dev/pci/pcivar.h> | #include <dev/pci/pcivar.h> | ||||
#ifdef DDB | #ifdef DDB | ||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
#endif | #endif | ||||
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); | static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); | ||||
/* | |||||
* Lock for x86-related structures. Notably modifying | |||||
* xen_intr_auto_vector_count, and allocating interrupts require this lock be | |||||
* held. | |||||
*/ | |||||
static struct mtx xen_intr_x86_lock; | |||||
static u_int first_evtchn_irq; | static u_int first_evtchn_irq; | ||||
/** | /** | ||||
* Per-cpu event channel processing state. | * Per-cpu event channel processing state. | ||||
*/ | */ | ||||
struct xen_intr_pcpu_data { | struct xen_intr_pcpu_data { | ||||
/** | /** | ||||
* The last event channel bitmap section (level one bit) processed. | * The last event channel bitmap section (level one bit) processed. | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | struct pic xen_intr_pic = { | ||||
.pic_vector = xen_intr_vector, | .pic_vector = xen_intr_vector, | ||||
.pic_source_pending = xen_intr_source_pending, | .pic_source_pending = xen_intr_source_pending, | ||||
.pic_suspend = xen_intr_suspend, | .pic_suspend = xen_intr_suspend, | ||||
.pic_resume = xen_intr_resume, | .pic_resume = xen_intr_resume, | ||||
.pic_config_intr = xen_intr_config_intr, | .pic_config_intr = xen_intr_config_intr, | ||||
.pic_assign_cpu = xen_intr_assign_cpu | .pic_assign_cpu = xen_intr_assign_cpu | ||||
}; | }; | ||||
/* | |||||
* Lock for interrupt core data. | |||||
* | |||||
* Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former) | |||||
* requires this lock be held. Any time this lock is not held, the condition | |||||
* `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)` | |||||
* MUST be true for all values of i which are valid indicies of the array. | |||||
* | |||||
* Acquire/release operations for isrc->xi_refcount require this lock be held. | |||||
*/ | |||||
static struct mtx xen_intr_isrc_lock; | static struct mtx xen_intr_isrc_lock; | ||||
static u_int xen_intr_auto_vector_count; | static u_int xen_intr_auto_vector_count; | ||||
static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; | static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; | ||||
/*------------------------- Private Functions --------------------------------*/ | /*------------------------- Private Functions --------------------------------*/ | ||||
/** | /** | ||||
* Retrieve a handle for a Xen interrupt source. | * Retrieve a handle for a Xen interrupt source. | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* \return A pointer to a free Xen interrupt source object or NULL. | * \return A pointer to a free Xen interrupt source object or NULL. | ||||
*/ | */ | ||||
static struct xenisrc * | static struct xenisrc * | ||||
xen_intr_find_unused_isrc(enum evtchn_type type) | xen_intr_find_unused_isrc(enum evtchn_type type) | ||||
{ | { | ||||
u_int isrc_idx; | u_int isrc_idx; | ||||
KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held")); | mtx_assert(&xen_intr_x86_lock, MA_OWNED); | ||||
for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { | for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { | ||||
struct xenisrc *isrc; | struct xenisrc *isrc; | ||||
u_int vector; | u_int vector; | ||||
vector = first_evtchn_irq + isrc_idx; | vector = first_evtchn_irq + isrc_idx; | ||||
isrc = (struct xenisrc *)intr_lookup_source(vector); | isrc = (struct xenisrc *)intr_lookup_source(vector); | ||||
if (isrc != NULL | if (isrc != NULL | ||||
Show All 17 Lines | |||||
*/ | */ | ||||
static struct xenisrc * | static struct xenisrc * | ||||
xen_intr_alloc_isrc(enum evtchn_type type) | xen_intr_alloc_isrc(enum evtchn_type type) | ||||
{ | { | ||||
static int warned; | static int warned; | ||||
struct xenisrc *isrc; | struct xenisrc *isrc; | ||||
unsigned int vector; | unsigned int vector; | ||||
KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held")); | mtx_lock(&xen_intr_x86_lock); | ||||
isrc = xen_intr_find_unused_isrc(type); | |||||
if (isrc != NULL) { | |||||
mtx_unlock(&xen_intr_x86_lock); | |||||
return (isrc); | |||||
} | |||||
if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { | if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { | ||||
if (!warned) { | if (!warned) { | ||||
warned = 1; | warned = 1; | ||||
printf("%s: Event channels exhausted.\n", __func__); | printf("%s: Event channels exhausted.\n", __func__); | ||||
} | } | ||||
mtx_unlock(&xen_intr_x86_lock); | |||||
return (NULL); | return (NULL); | ||||
} | } | ||||
vector = first_evtchn_irq + xen_intr_auto_vector_count; | vector = first_evtchn_irq + xen_intr_auto_vector_count; | ||||
xen_intr_auto_vector_count++; | xen_intr_auto_vector_count++; | ||||
KASSERT((intr_lookup_source(vector) == NULL), | KASSERT((intr_lookup_source(vector) == NULL), | ||||
("Trying to use an already allocated vector")); | ("Trying to use an already allocated vector")); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); | isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); | ||||
isrc->xi_intsrc.is_pic = &xen_intr_pic; | isrc->xi_intsrc.is_pic = &xen_intr_pic; | ||||
isrc->xi_vector = vector; | isrc->xi_vector = vector; | ||||
isrc->xi_type = type; | isrc->xi_type = type; | ||||
intr_register_source(&isrc->xi_intsrc); | intr_register_source(&isrc->xi_intsrc); | ||||
mtx_lock(&xen_intr_isrc_lock); | |||||
return (isrc); | return (isrc); | ||||
} | } | ||||
/** | /** | ||||
* Attempt to free an active Xen interrupt source object. | * Attempt to free an active Xen interrupt source object. | ||||
* | * | ||||
* \param isrc The interrupt source object to release. | * \param isrc The interrupt source object to release. | ||||
Show All 16 Lines | xen_intr_release_isrc(struct xenisrc *isrc) | ||||
if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { | if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { | ||||
struct evtchn_close close = { .port = isrc->xi_port }; | struct evtchn_close close = { .port = isrc->xi_port }; | ||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) | ||||
panic("EVTCHNOP_close failed"); | panic("EVTCHNOP_close failed"); | ||||
} | } | ||||
xen_intr_port_to_isrc[isrc->xi_port] = NULL; | xen_intr_port_to_isrc[isrc->xi_port] = NULL; | ||||
/* not reachable from xen_intr_port_to_isrc[], unlock */ | |||||
mtx_unlock(&xen_intr_isrc_lock); | |||||
isrc->xi_cpu = 0; | isrc->xi_cpu = 0; | ||||
isrc->xi_type = EVTCHN_TYPE_UNBOUND; | |||||
isrc->xi_port = 0; | isrc->xi_port = 0; | ||||
isrc->xi_cookie = NULL; | isrc->xi_cookie = NULL; | ||||
mtx_unlock(&xen_intr_isrc_lock); | /* | ||||
* Only when ->xi_type == EVTCHN_TYPE_UNBOUND is the isrc under control | |||||
* of xen_intr_x86_lock. | |||||
*/ | |||||
mtx_lock(&xen_intr_x86_lock); | |||||
isrc->xi_type = EVTCHN_TYPE_UNBOUND; | |||||
mtx_unlock(&xen_intr_x86_lock); | |||||
return (0); | return (0); | ||||
} | } | ||||
royger: I'm not a huge fan of doing this kind of tricks, as such open coded locking primitives often… | |||||
Done Inline ActionsYes, I was wondering about this. Problem is if you leave me waiting with time on my hands, sometimes code gets even more optimized... Can indeed simply be lock, isrc->xi_type = EVTCHN_TYPE_UNBOUND;, unlock. I've been wondering about whether it is worth clearing isrc here. The x86 architecture side only looks at ->xi_type and the other values should be cleared during allocation. ehem_freebsd_m5p.com: Yes, I was wondering about this. Problem is if you leave me waiting with time on my hands… | |||||
/** | /** | ||||
* Associate an interrupt handler with an already allocated local Xen | * Associate an interrupt handler with an already allocated local Xen | ||||
* event channel port. | * event channel port. | ||||
* | * | ||||
* \param isrcp The returned Xen interrupt object associated with | * \param isrcp The returned Xen interrupt object associated with | ||||
* the specified local port. | * the specified local port. | ||||
* \param local_port The event channel to bind. | * \param local_port The event channel to bind. | ||||
Show All 21 Lines | xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port, | ||||
int error; | int error; | ||||
*isrcp = NULL; | *isrcp = NULL; | ||||
if (port_handlep == NULL) { | if (port_handlep == NULL) { | ||||
printf("%s: %s: Bad event handle\n", intr_owner, __func__); | printf("%s: %s: Bad event handle\n", intr_owner, __func__); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
mtx_lock(&xen_intr_isrc_lock); | |||||
isrc = xen_intr_find_unused_isrc(type); | |||||
if (isrc == NULL) { | |||||
isrc = xen_intr_alloc_isrc(type); | isrc = xen_intr_alloc_isrc(type); | ||||
if (isrc == NULL) { | if (isrc == NULL) | ||||
mtx_unlock(&xen_intr_isrc_lock); | |||||
return (ENOSPC); | return (ENOSPC); | ||||
} | |||||
} | |||||
isrc->xi_port = local_port; | isrc->xi_port = local_port; | ||||
mtx_lock(&xen_intr_isrc_lock); | |||||
xen_intr_port_to_isrc[local_port] = isrc; | xen_intr_port_to_isrc[local_port] = isrc; | ||||
refcount_init(&isrc->xi_refcount, 1); | refcount_init(&isrc->xi_refcount, 1); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
/* Assign the opaque handler */ | /* Assign the opaque handler */ | ||||
*port_handlep = xen_intr_handle_from_isrc(isrc); | *port_handlep = xen_intr_handle_from_isrc(isrc); | ||||
#ifdef SMP | #ifdef SMP | ||||
▲ Show 20 Lines • Show All 173 Lines • ▼ Show 20 Lines | xen_intr_init(void *dummy __unused) | ||||
shared_info_t *s = HYPERVISOR_shared_info; | shared_info_t *s = HYPERVISOR_shared_info; | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
u_int i; | u_int i; | ||||
if (!xen_domain()) | if (!xen_domain()) | ||||
return (0); | return (0); | ||||
mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); | mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); | ||||
mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); | |||||
Not Done Inline Actionstable? xen-x86-irq-lock it's maybe better? royger: table? xen-x86-irq-lock it's maybe better? | |||||
Done Inline ActionsI was thinking of it being the lock for the x86 interrupt table. One can also think of it handling the Xen section of interrupt_sources[], so seemed like a table lock. No objection with renaming "xen-x86-irq-lock". This seems like an adjust when it gets into freebsd-src. ehem_freebsd_m5p.com: I was thinking of it being the lock for the x86 interrupt table. One can also think of it… | |||||
/* | /* | ||||
* Set the per-cpu mask of CPU#0 to enable all, since by default all | * Set the per-cpu mask of CPU#0 to enable all, since by default all | ||||
* event channels are bound to CPU#0. | * event channels are bound to CPU#0. | ||||
*/ | */ | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | ||||
memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0, | memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0, | ||||
▲ Show 20 Lines • Show All 753 Lines • Show Last 20 Lines |
I'm not a huge fan of doing this kind of tricks, as such open coded locking primitives often turn out to make the whole thing more fragile in the long term. Isn't there a way you could also hold xen_intr_x86_lock here, which is the one used by xen_intr_find_unused_isrc?