Changeset View
Standalone View
sys/xen/xen_intr.c
Show First 20 Lines • Show All 262 Lines • ▼ Show 20 Lines | |||||
* Allocate a Xen interrupt source object. | * Allocate a Xen interrupt source object. | ||||
* | * | ||||
* \param type The type of interrupt source to create. | * \param type The type of interrupt source to create. | ||||
* | * | ||||
* \return A pointer to a newly allocated Xen interrupt source | * \return A pointer to a newly allocated Xen interrupt source | ||||
* object or NULL. | * object or NULL. | ||||
*/ | */ | ||||
static struct xenisrc * | static struct xenisrc * | ||||
xen_intr_alloc_isrc(enum evtchn_type type) | xen_intr_alloc_isrc(enum evtchn_type type, evtchn_port_t port) | ||||
{ | { | ||||
static int warned; | static int warned; | ||||
struct xenisrc *isrc; | struct xenisrc *isrc; | ||||
unsigned int vector; | unsigned int vector; | ||||
mtx_lock(&xen_intr_x86_lock); | mtx_lock(&xen_intr_x86_lock); | ||||
isrc = xen_intr_find_unused_isrc(type); | isrc = xen_intr_find_unused_isrc(type); | ||||
if (isrc != NULL) { | if (isrc != NULL) { | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (isrc); | goto out; | ||||
} | } | ||||
if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { | if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { | ||||
if (!warned) { | if (!warned) { | ||||
warned = 1; | warned = 1; | ||||
printf("%s: Event channels exhausted.\n", __func__); | printf("%s: Event channels exhausted.\n", __func__); | ||||
} | } | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
vector = first_evtchn_irq + xen_intr_auto_vector_count; | vector = first_evtchn_irq + xen_intr_auto_vector_count; | ||||
xen_intr_auto_vector_count++; | xen_intr_auto_vector_count++; | ||||
KASSERT((intr_lookup_source(vector) == NULL), | KASSERT((intr_lookup_source(vector) == NULL), | ||||
("Trying to use an already allocated vector")); | ("Trying to use an already allocated vector")); | ||||
mtx_unlock(&xen_intr_x86_lock); | mtx_unlock(&xen_intr_x86_lock); | ||||
isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); | isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); | ||||
isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; | isrc->xi_arch.xai_intsrc.is_pic = &xen_intr_pic; | ||||
isrc->xi_arch.xai_vector = vector; | isrc->xi_arch.xai_vector = vector; | ||||
isrc->xi_type = type; | isrc->xi_type = type; | ||||
intr_register_source(&isrc->xi_arch.xai_intsrc); | intr_register_source(&isrc->xi_arch.xai_intsrc); | ||||
out: | |||||
/* xen_intr_assign_cpu() requires this to be set */ | |||||
isrc->xi_port = port; | |||||
#ifdef SMP | |||||
if (type == EVTCHN_TYPE_PORT) { | |||||
/* | |||||
* By default all interrupts are assigned to vCPU#0 | |||||
* unless specified otherwise, so shuffle them to balance | |||||
* the interrupt load. | |||||
*/ | |||||
xen_intr_assign_cpu(isrc, intr_next_cpu(0)); | |||||
ehem_freebsd_m5p.com: Upon comparing with other places, it is possible to merely do `isrc->xi_cpu = intr_next_cpu(0)… | |||||
} | |||||
#endif | |||||
return (isrc); | return (isrc); | ||||
} | } | ||||
/** | /** | ||||
* Attempt to free an active Xen interrupt source object. | * Attempt to free an active Xen interrupt source object. | ||||
* | * | ||||
* \param isrc The interrupt source object to release. | * \param isrc The interrupt source object to release. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port, | ||||
int error; | int error; | ||||
*isrcp = NULL; | *isrcp = NULL; | ||||
if (port_handlep == NULL) { | if (port_handlep == NULL) { | ||||
printf("%s: %s: Bad event handle\n", intr_owner, __func__); | printf("%s: %s: Bad event handle\n", intr_owner, __func__); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
isrc = xen_intr_alloc_isrc(type); | isrc = xen_intr_alloc_isrc(type, local_port); | ||||
if (isrc == NULL) | if (isrc == NULL) | ||||
return (ENOSPC); | return (ENOSPC); | ||||
mtx_lock(&xen_intr_isrc_lock); | mtx_lock(&xen_intr_isrc_lock); | ||||
isrc->xi_port = local_port; | |||||
xen_intr_port_to_isrc[isrc->xi_port] = isrc; | xen_intr_port_to_isrc[isrc->xi_port] = isrc; | ||||
refcount_init(&isrc->xi_refcount, 1); | refcount_init(&isrc->xi_refcount, 1); | ||||
mtx_unlock(&xen_intr_isrc_lock); | mtx_unlock(&xen_intr_isrc_lock); | ||||
/* Assign the opaque handler */ | /* Assign the opaque handler */ | ||||
*port_handlep = xen_intr_handle_from_isrc(isrc); | *port_handlep = xen_intr_handle_from_isrc(isrc); | ||||
#ifdef SMP | |||||
if (type == EVTCHN_TYPE_PORT) { | |||||
/* | |||||
* By default all interrupts are assigned to vCPU#0 | |||||
* unless specified otherwise, so shuffle them to balance | |||||
* the interrupt load. | |||||
*/ | |||||
xen_intr_assign_cpu(isrc, intr_next_cpu(0)); | |||||
} | |||||
#endif | |||||
if (filter == NULL && handler == NULL) { | if (filter == NULL && handler == NULL) { | ||||
Done Inline Actionsnewline, and cpu can likely be unsigned. royger: newline, and cpu can likely be unsigned. | |||||
Done Inline Actions<insert cursing at the stupid goof> Yeah, that should be. ehem_freebsd_m5p.com: <insert cursing at the stupid goof> Yeah, that should be. | |||||
/* | /* | ||||
Done Inline ActionsWhy not make the call to intr_next_cpu here instead of stashing the value in xi_cpu and carrying it over from xen_intr_alloc_isrc? royger: Why not make the call to intr_next_cpu here instead of stashing the value in xi_cpu and… | |||||
Done Inline ActionsIf you look at diff #3, you'll see I was originally doing that. This approach though has two advantages:
ehem_freebsd_m5p.com: If you look at diff #3, you'll see I was originally doing that. This approach though has two… | |||||
Done Inline ActionsHmm, rereading I'm unsure I answered the right question. The previous is my reasoning for keeping the xen_intr_assign_cpu() call here, while moving the intr_next_cpu() call. The reason for moving the intr_next_cpu() call is in the main summary. Mainly intr_next_cpu() is x86-only. This isn't really an allocation task, but since xen_intr_alloc_isrc() is the portion being broken off, moving it there seems reasonable. ehem_freebsd_m5p.com: Hmm, rereading I'm unsure I answered the right question. The previous is my reasoning for… | |||||
Not Done Inline ActionsCould we maybe have something like xen_arch_intr_next_cpu() or similar, that is a per-arch function? On x86 it would be a wrapper around intr_next_cpu(), on Arm I'm not sure. royger: Could we maybe have something like xen_arch_intr_next_cpu() or similar, that is a per-arch… | |||||
Done Inline ActionsCertainly. The function for ARM is intr_irq_next_cpu() and matching the interface you're proposing would be simple to implement. I dislike an entire extra function call into the architecture side merely for setting one variable. Particularly when xen_arch_intr_alloc() can readily handle this. On the flip side, this would remove an access to a core variable by the architecture side. Presently I still favor setting isrc->xi_cpu, but almost any interface is possible. ehem_freebsd_m5p.com: Certainly. The function for ARM is `intr_irq_next_cpu()` and matching the interface you're… | |||||
Not Done Inline ActionsIf you have xen intr arch specific headers you could place this as a static inline function, in any case I'm not specially fuzzed by having an extra function call, at the end of day this is not a hot path. I think the extra call is worth having in order to avoid the dance with setting xi_cpu in one function just so that another one can figure out which CPU the interrupt should be bound to. royger: If you have xen intr arch specific headers you could place this as a static inline function, in… | |||||
Done Inline ActionsI'm not that bothered by the idea of having a xen_arch_intr_next_cpu() function. Including processor balancing as part of xen_arch_intr_alloc() though appears much more flexible. Requiring an extra function in a header means more maintenance than simply including it as part of setup. It could be reasonable for an architecture to only set the vCPU on newly allocated sources, but leave it completely alone on reused sources. This would leave sources as balanced as they already are, as long as newly allocated sources are reasonably balanced. The interface you're proposing wouldn't allow this. ehem_freebsd_m5p.com: I'm not //that// bothered by the idea of having a `xen_arch_intr_next_cpu()` function. | |||||
Done Inline Actions(see D31690 for the present ARM64 implementation) ehem_freebsd_m5p.com: (see D31690 for the present ARM64 implementation) | |||||
* No filter/handler provided, leave the event channel | * No filter/handler provided, leave the event channel | ||||
* masked and without a valid handler, the caller is | * masked and without a valid handler, the caller is | ||||
* in charge of setting that up. | * in charge of setting that up. | ||||
*/ | */ | ||||
*isrcp = isrc; | *isrcp = isrc; | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 896 Lines • Show Last 20 Lines |
Upon comparing with other places, it is possible to merely do isrc->xi_cpu = intr_next_cpu(0); here. At which point the section in xen_intr_bind_isrc() can do the xen_intr_assign_cpu() call.