Page MenuHomeFreeBSD

D30726.id98215.diff
No OneTemporary

D30726.id98215.diff

Index: sys/x86/xen/xen_intr.c
===================================================================
--- sys/x86/xen/xen_intr.c
+++ sys/x86/xen/xen_intr.c
@@ -73,6 +73,13 @@
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
+/*
+ * Lock for x86-related structures. Notably modifying
+ * xen_intr_auto_vector_count, and allocating interrupts require this lock be
+ * held.
+ */
+static struct mtx xen_intr_x86_lock;
+
static u_int first_evtchn_irq;
/**
@@ -160,6 +167,18 @@
.pic_assign_cpu = xen_intr_assign_cpu
};
+/*
+ * Lock for interrupt core data.
+ *
+ * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
+ * requires this lock be held. Any time this lock is not held, the condition
+ * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
+ * MUST be true for all values of i which are valid indicies of the array.
+ *
+ * Xen per-CPU port (un)masking operations require this lock be held.
+ *
+ * Acquire/release operations for isrc->xi_refcount require this lock be held.
+ */
static struct mtx xen_intr_isrc_lock;
static u_int xen_intr_auto_vector_count;
static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
@@ -274,7 +293,7 @@
{
u_int isrc_idx;
- KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
+ mtx_assert(&xen_intr_x86_lock, MA_OWNED);
for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
struct xenisrc *isrc;
@@ -308,13 +327,20 @@
struct xenisrc *isrc;
unsigned int vector;
- KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
+ mtx_lock(&xen_intr_x86_lock);
+ isrc = xen_intr_find_unused_isrc(type);
+
+ if (isrc != NULL) {
+ mtx_unlock(&xen_intr_x86_lock);
+ return (isrc);
+ }
if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
if (!warned) {
warned = 1;
printf("%s: Event channels exhausted.\n", __func__);
}
+ mtx_unlock(&xen_intr_x86_lock);
return (NULL);
}
@@ -324,13 +350,12 @@
KASSERT((intr_lookup_source(vector) == NULL),
("Trying to use an already allocated vector"));
- mtx_unlock(&xen_intr_isrc_lock);
+ mtx_unlock(&xen_intr_x86_lock);
isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
isrc->xi_intsrc.is_pic = &xen_intr_pic;
isrc->xi_vector = vector;
isrc->xi_type = type;
intr_register_source(&isrc->xi_intsrc);
- mtx_lock(&xen_intr_isrc_lock);
return (isrc);
}
@@ -363,11 +388,21 @@
}
xen_intr_port_to_isrc[isrc->xi_port] = NULL;
+ /* not reachable from xen_intr_port_to_isrc[], therefore unlock */
+ mtx_unlock(&xen_intr_isrc_lock);
+
isrc->xi_cpu = 0;
- isrc->xi_type = EVTCHN_TYPE_UNBOUND;
isrc->xi_port = 0;
isrc->xi_cookie = NULL;
- mtx_unlock(&xen_intr_isrc_lock);
+ /*
+ * Fun with locking here. xen_intr_x86_lock is actually controlling
+ * *allocation*. This means the isrc isn't under control of the lock
+ * until ->xi_type == EVTCHN_TYPE_UNBOUND. The consequence is
+ * atomic_store_rel() is appropriate since we merely need the other
+ * stores to complete before this one. This one simply needs to
+ * complete atomically.
+ */
+ atomic_store_rel_32(&isrc->xi_type, EVTCHN_TYPE_UNBOUND);
return (0);
}
@@ -407,15 +442,10 @@
return (EINVAL);
}
+ isrc = xen_intr_alloc_isrc(type);
+ if (isrc == NULL)
+ return (ENOSPC);
mtx_lock(&xen_intr_isrc_lock);
- isrc = xen_intr_find_unused_isrc(type);
- if (isrc == NULL) {
- isrc = xen_intr_alloc_isrc(type);
- if (isrc == NULL) {
- mtx_unlock(&xen_intr_isrc_lock);
- return (ENOSPC);
- }
- }
isrc->xi_port = local_port;
xen_intr_port_to_isrc[local_port] = isrc;
refcount_init(&isrc->xi_refcount, 1);
@@ -607,6 +637,8 @@
mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
+ mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF);
+
/*
* Set the per-cpu mask of CPU#0 to enable all, since by default all
* event channels are bound to CPU#0.

File Metadata

Mime Type
text/plain
Expires
Thu, Dec 25, 8:17 AM (8 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27244356
Default Alt Text
D30726.id98215.diff (3 KB)

Event Timeline