Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/xen/xen_arch_intr.c
Show First 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
#include <machine/intr_machdep.h> | #include <machine/intr_machdep.h> | ||||
#include <x86/apicvar.h> | #include <x86/apicvar.h> | ||||
#include <xen/xen-os.h> | #include <xen/xen-os.h> | ||||
#include <xen/xen_intr.h> | #include <xen/xen_intr.h> | ||||
#include <xen/arch-intr.h> | #include <xen/arch-intr.h> | ||||
/* | |||||
* Lock for x86-related structures. Notably modifying | |||||
* xen_intr_auto_vector_count, and allocating interrupts require this lock be | |||||
* held. | |||||
*/ | |||||
static struct mtx xen_intr_x86_lock; | |||||
static u_int first_evtchn_irq; | |||||
static u_int xen_intr_auto_vector_count; | |||||
void | |||||
xen_intr_alloc_irqs(void) | |||||
{ | |||||
if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS) | |||||
panic("IRQ allocation overflow (num_msi_irqs too high?)"); | |||||
first_evtchn_irq = num_io_irqs; | |||||
num_io_irqs += NR_EVENT_CHANNELS; | |||||
} | |||||
/********************************* EVTCHN PIC ********************************/ | /********************************* EVTCHN PIC ********************************/ | ||||
static void | static void | ||||
xen_intr_pic_enable_source(struct intsrc *isrc) | xen_intr_pic_enable_source(struct intsrc *isrc) | ||||
{ | { | ||||
CTASSERT(offsetof(struct xenisrc, xi_arch.xai_intsrc) == 0); | CTASSERT(offsetof(struct xenisrc, xi_arch.xai_intsrc) == 0); | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | xen_intr_pic_assign_cpu(struct intsrc *isrc, u_int apic_id) | ||||
return (xen_intr_assign_cpu((struct xenisrc *)isrc, | return (xen_intr_assign_cpu((struct xenisrc *)isrc, | ||||
apic_cpuid(apic_id))); | apic_cpuid(apic_id))); | ||||
} | } | ||||
/** | /** | ||||
* PIC interface for all event channel port types except physical IRQs. | * PIC interface for all event channel port types except physical IRQs. | ||||
*/ | */ | ||||
struct pic xen_intr_pic = { | static struct pic xen_intr_pic = { | ||||
.pic_enable_source = xen_intr_pic_enable_source, | .pic_enable_source = xen_intr_pic_enable_source, | ||||
.pic_disable_source = xen_intr_pic_disable_source, | .pic_disable_source = xen_intr_pic_disable_source, | ||||
.pic_eoi_source = xen_intr_pic_eoi_source, | .pic_eoi_source = xen_intr_pic_eoi_source, | ||||
.pic_enable_intr = xen_intr_pic_enable_intr, | .pic_enable_intr = xen_intr_pic_enable_intr, | ||||
.pic_disable_intr = xen_intr_pic_disable_intr, | .pic_disable_intr = xen_intr_pic_disable_intr, | ||||
.pic_vector = xen_intr_pic_vector, | .pic_vector = xen_intr_pic_vector, | ||||
.pic_source_pending = xen_intr_pic_source_pending, | .pic_source_pending = xen_intr_pic_source_pending, | ||||
.pic_suspend = xen_intr_pic_suspend, | .pic_suspend = xen_intr_pic_suspend, | ||||
.pic_resume = xen_intr_pic_resume, | .pic_resume = xen_intr_pic_resume, | ||||
.pic_config_intr = xen_intr_pic_config_intr, | .pic_config_intr = xen_intr_pic_config_intr, | ||||
.pic_assign_cpu = xen_intr_pic_assign_cpu | .pic_assign_cpu = xen_intr_pic_assign_cpu | ||||
}; | }; | ||||
/****************************** ARCH wrappers ********************************/ | /****************************** ARCH wrappers ********************************/ | ||||
void | void | ||||
xen_arch_intr_init(void) | xen_arch_intr_init(void) | ||||
{ | { | ||||
mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); | |||||
intr_register_pic(&xen_intr_pic); | intr_register_pic(&xen_intr_pic); | ||||
} | |||||
/** | |||||
* Search for an already allocated but currently unused Xen interrupt | |||||
* source object. | |||||
* | |||||
* \param type Restrict the search to interrupt sources of the given | |||||
* type. | |||||
* | |||||
* \return A pointer to a free Xen interrupt source object or NULL. | |||||
*/ | |||||
static struct xenisrc * | |||||
xen_intr_find_unused_isrc(enum evtchn_type type) | |||||
{ | |||||
int isrc_idx; | |||||
mtx_assert(&xen_intr_x86_lock, MA_OWNED); | |||||
for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { | |||||
struct xenisrc *isrc; | |||||
u_int vector; | |||||
vector = first_evtchn_irq + isrc_idx; | |||||
isrc = (struct xenisrc *)intr_lookup_source(vector); | |||||
if (isrc != NULL | |||||
&& isrc->xi_type == EVTCHN_TYPE_UNBOUND) { | |||||
KASSERT(xen_arch_intr_has_handlers(isrc), | |||||
("Free evtchn still has handlers")); | |||||
isrc->xi_type = type; | |||||
return (isrc); | |||||
} | |||||
} | |||||
return (NULL); | |||||
} | |||||
/** | |||||
* Allocate a Xen interrupt source object. | |||||
* | |||||
* \param type The type of interrupt source to create. | |||||
* | |||||
* \return A pointer to a newly allocated Xen interrupt source | |||||
* object or NULL. | |||||
*/ | |||||
struct xenisrc * | |||||
xen_arch_intr_alloc(struct malloc_type *mtype, const char *name __unused, | |||||
enum evtchn_type type, evtchn_port_t port) | |||||
{ | |||||
static int warned; | |||||
struct xenisrc *isrc; | |||||
unsigned int vector; | |||||
mtx_lock(&xen_intr_x86_lock); | |||||
isrc = xen_intr_find_unused_isrc(type); | |||||
if (isrc != NULL) { | |||||
mtx_unlock(&xen_intr_x86_lock); | |||||
goto out; | |||||
} | |||||
if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { | |||||
if (!warned) { | |||||
warned = 1; | |||||
printf("%s: Xen interrupts exhausted.\n", __func__); | |||||
} | |||||
mtx_unlock(&xen_intr_x86_lock); | |||||
return (NULL); | |||||
} | |||||
vector = first_evtchn_irq + xen_intr_auto_vector_count; | |||||
xen_intr_auto_vector_count++; | |||||
mtx_unlock(&xen_intr_x86_lock); | |||||
isrc = malloc(sizeof(*isrc), mtype, M_WAITOK | M_ZERO); | |||||
isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; | |||||
isrc->xi_arch.xai_vector = vector; | |||||
isrc->xi_type = type; | |||||
if (intr_register_source(&isrc->xi_arch.xai_intsrc) != 0) { | |||||
free(isrc, mtype); | |||||
return (NULL); | |||||
} | |||||
out: | |||||
/* xen_intr_assign_cpu() requires this to be set */ | |||||
isrc->xi_port = port; | |||||
#ifdef SMP | |||||
if (type == EVTCHN_TYPE_PORT) { | |||||
/* | |||||
* By default all interrupts are assigned to vCPU#0 | |||||
* unless specified otherwise, so shuffle them to balance | |||||
* the interrupt load. | |||||
*/ | |||||
xen_intr_assign_cpu(isrc, intr_next_cpu(0)); | |||||
} | |||||
#endif | |||||
return (isrc); | |||||
} | |||||
void | |||||
xen_arch_intr_release(struct malloc_type *mtype __unused, struct xenisrc *isrc) | |||||
{ | |||||
isrc->xi_cpu = 0; | |||||
isrc->xi_port = ~0; | |||||
isrc->xi_cookie = NULL; | |||||
/* | |||||
* Fun with locking here. xen_intr_x86_lock is actually controlling | |||||
* *allocation*. This means the isrc isn't under control of the lock | |||||
ehem_freebsd_m5p.com: Mentioned on D30726, should this segment be dropped? These values //must// be set during the… | |||||
* until ->xi_type == EVTCHN_TYPE_UNBOUND. The consequence is | |||||
* atomic_store_rel() is appropriate since we merely need the other | |||||
* stores to complete before this one. This one simply needs to | |||||
* complete atomically. | |||||
*/ | |||||
atomic_store_rel_32(&isrc->xi_type, EVTCHN_TYPE_UNBOUND); | |||||
} | } | ||||
bool | bool | ||||
xen_arch_intr_has_handlers(struct xenisrc *isrc) | xen_arch_intr_has_handlers(struct xenisrc *isrc) | ||||
{ | { | ||||
return (isrc->xi_arch.xai_intsrc.is_handlers != 0); | return (isrc->xi_arch.xai_intsrc.is_handlers != 0); | ||||
} | } | ||||
Show All 39 Lines |
Mentioned on D30726, should this segment be dropped? These values must be set during the allocation process, while the architecture code only looks at ->xi_type (== EVTCHN_TYPE_UNBOUND, presently unallocated, other values indicate in use). Though looks like xen_intr_bind_isrc() may need to start clearing ->xi_cookie (which it should have been doing already, D31188 implements this implicitly).