Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/xen/xen_intr.c
Show First 20 Lines • Show All 65 Lines • ▼ Show 20 Lines | |||||
#include <dev/pci/pcivar.h> | #include <dev/pci/pcivar.h> | ||||
#ifdef DDB | #ifdef DDB | ||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
#endif | #endif | ||||
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); | static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); | ||||
#define ENABLED_SETSIZE (sizeof(u_long) * 8) | |||||
BITSET_DEFINE(enabledbits, ENABLED_SETSIZE) | |||||
/** | /** | ||||
* Per-cpu event channel processing state. | * Per-cpu event channel processing state. | ||||
*/ | */ | ||||
struct xen_intr_pcpu_data { | struct xen_intr_pcpu_data { | ||||
/** | /** | ||||
* The last event channel bitmap section (level one bit) processed. | * The last event channel bitmap section (level one bit) processed. | ||||
* This is used to ensure we scan all ports before | * This is used to ensure we scan all ports before | ||||
* servicing an already servied port again. | * servicing an already servied port again. | ||||
*/ | */ | ||||
u_int last_processed_l1i; | u_int last_processed_l1i; | ||||
/** | /** | ||||
* The last event channel processed within the event channel | * The last event channel processed within the event channel | ||||
* bitmap being scanned. | * bitmap being scanned. | ||||
*/ | */ | ||||
u_int last_processed_l2i; | u_int last_processed_l2i; | ||||
/** Pointer to this CPU's interrupt statistic counter. */ | /** Pointer to this CPU's interrupt statistic counter. */ | ||||
u_long *evtchn_intrcnt; | u_long *evtchn_intrcnt; | ||||
/** | /** | ||||
* A bitmap of ports that can be serviced from this CPU. | * A bitmap of ports that can be serviced from this CPU. | ||||
* A set bit means interrupt handling is enabled. | * A set bit means interrupt handling is enabled. | ||||
*/ | */ | ||||
u_long evtchn_enabled[sizeof(u_long) * 8]; | struct enabledbits evtchn_enabled; | ||||
}; | }; | ||||
/* | /* | ||||
* Start the scan at port 0 by initializing the last scanned | * Start the scan at port 0 by initializing the last scanned | ||||
* location as the highest numbered event channel port. | * location as the highest numbered event channel port. | ||||
*/ | */ | ||||
DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = { | ||||
.last_processed_l1i = LONG_BIT - 1, | .last_processed_l1i = LONG_BIT - 1, | ||||
.last_processed_l2i = LONG_BIT - 1 | .last_processed_l2i = LONG_BIT - 1 | ||||
}; | }; | ||||
DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | DPCPU_DECLARE(struct vcpu_info *, vcpu_info); | ||||
#define XEN_EEXIST 17 /* Xen "already exists" error */ | #define XEN_EEXIST 17 /* Xen "already exists" error */ | ||||
#define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */ | #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */ | ||||
▲ Show 20 Lines • Show All 93 Lines • ▼ Show 20 Lines | |||||
* event channel globally, use evtchn_mask(). | * event channel globally, use evtchn_mask(). | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port) | evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port) | ||||
{ | { | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | ||||
clear_bit(port, pcpu->evtchn_enabled); | BIT_CLR_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled); | ||||
} | } | ||||
/** | /** | ||||
* Enable signal delivery for an event channel port on the | * Enable signal delivery for an event channel port on the | ||||
* specified CPU. | * specified CPU. | ||||
* | * | ||||
* \param port The event channel port to unmask. | * \param port The event channel port to unmask. | ||||
* | * | ||||
* This API is used to manage the port<=>CPU binding of event | * This API is used to manage the port<=>CPU binding of event | ||||
* channel handlers. | * channel handlers. | ||||
* | * | ||||
* \note This operation does not guarantee that event delivery | * \note This operation does not guarantee that event delivery | ||||
* is enabled for this event channel port. The port must | * is enabled for this event channel port. The port must | ||||
* also be globally enabled. See evtchn_unmask(). | * also be globally enabled. See evtchn_unmask(). | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port) | evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port) | ||||
{ | { | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); | ||||
set_bit(port, pcpu->evtchn_enabled); | BIT_SET_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled); | ||||
} | } | ||||
/** | /** | ||||
* Allocate and register a per-cpu Xen upcall interrupt counter. | * Allocate and register a per-cpu Xen upcall interrupt counter. | ||||
* | * | ||||
* \param cpu The cpu for which to register this interrupt count. | * \param cpu The cpu for which to register this interrupt count. | ||||
*/ | */ | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 247 Lines • ▼ Show 20 Lines | |||||
* events. | * events. | ||||
*/ | */ | ||||
static inline u_long | static inline u_long | ||||
xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh, | xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh, | ||||
u_int idx) | u_int idx) | ||||
{ | { | ||||
return (sh->evtchn_pending[idx] | return (sh->evtchn_pending[idx] | ||||
& ~sh->evtchn_mask[idx] | & ~sh->evtchn_mask[idx] | ||||
& pcpu->evtchn_enabled[idx]); | & pcpu->evtchn_enabled.__bits[idx]); | ||||
royger: I don't like accessing `__bits` directly, but I cannot find a macro or function to get this. | |||||
} | } | ||||
/** | /** | ||||
* Interrupt handler for processing all Xen event channel events. | * Interrupt handler for processing all Xen event channel events. | ||||
* | * | ||||
* \param trap_frame The trap frame context for the current interrupt. | * \param trap_frame The trap frame context for the current interrupt. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 119 Lines • ▼ Show 20 Lines | xen_intr_init(void *dummy __unused) | ||||
* Register interrupt count manually as we aren't | * Register interrupt count manually as we aren't | ||||
* guaranteed to see a call to xen_intr_assign_cpu() | * guaranteed to see a call to xen_intr_assign_cpu() | ||||
* before our first interrupt. Also set the per-cpu | * before our first interrupt. Also set the per-cpu | ||||
* mask of CPU#0 to enable all, since by default | * mask of CPU#0 to enable all, since by default | ||||
* all event channels are bound to CPU#0. | * all event channels are bound to CPU#0. | ||||
*/ | */ | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | ||||
memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0, | if (i == 0) | ||||
sizeof(pcpu->evtchn_enabled)); | BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled); | ||||
else | |||||
BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled); | |||||
xen_intr_intrcnt_add(i); | xen_intr_intrcnt_add(i); | ||||
} | } | ||||
for (i = 0; i < nitems(s->evtchn_mask); i++) | for (i = 0; i < nitems(s->evtchn_mask); i++) | ||||
atomic_store_rel_long(&s->evtchn_mask[i], ~0); | atomic_store_rel_long(&s->evtchn_mask[i], ~0); | ||||
/* Try to register PIRQ EOI map */ | /* Try to register PIRQ EOI map */ | ||||
xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO); | xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO); | ||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | xen_intr_resume(struct pic *unused, bool suspend_cancelled) | ||||
if (suspend_cancelled) | if (suspend_cancelled) | ||||
return; | return; | ||||
/* Reset the per-CPU masks */ | /* Reset the per-CPU masks */ | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
struct xen_intr_pcpu_data *pcpu; | struct xen_intr_pcpu_data *pcpu; | ||||
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | ||||
memset(pcpu->evtchn_enabled, | |||||
i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled)); | if (i == 0) | ||||
BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled); | |||||
else | |||||
BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled); | |||||
} | } | ||||
/* Mask all event channels. */ | /* Mask all event channels. */ | ||||
for (i = 0; i < nitems(s->evtchn_mask); i++) | for (i = 0; i < nitems(s->evtchn_mask); i++) | ||||
atomic_store_rel_long(&s->evtchn_mask[i], ~0); | atomic_store_rel_long(&s->evtchn_mask[i], ~0); | ||||
/* Remove port -> isrc mappings */ | /* Remove port -> isrc mappings */ | ||||
memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc)); | memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc)); | ||||
▲ Show 20 Lines • Show All 267 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
xen_intr_pirq_eoi_source(struct intsrc *base_isrc) | xen_intr_pirq_eoi_source(struct intsrc *base_isrc) | ||||
{ | { | ||||
struct xenisrc *isrc; | struct xenisrc *isrc; | ||||
int error; | int error; | ||||
isrc = (struct xenisrc *)base_isrc; | isrc = (struct xenisrc *)base_isrc; | ||||
if (test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) { | if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) { | ||||
struct physdev_eoi eoi = { .irq = isrc->xi_pirq }; | struct physdev_eoi eoi = { .irq = isrc->xi_pirq }; | ||||
error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||||
if (error != 0) | if (error != 0) | ||||
panic("Unable to EOI PIRQ#%d: %d\n", | panic("Unable to EOI PIRQ#%d: %d\n", | ||||
isrc->xi_pirq, error); | isrc->xi_pirq, error); | ||||
} | } | ||||
} | } | ||||
Show All 20 Lines | if (!xen_intr_pirq_eoi_map_enabled) { | ||||
if (error) | if (error) | ||||
panic("unable to get status of IRQ#%d", isrc->xi_pirq); | panic("unable to get status of IRQ#%d", isrc->xi_pirq); | ||||
if (irq_status.flags & XENIRQSTAT_needs_eoi) { | if (irq_status.flags & XENIRQSTAT_needs_eoi) { | ||||
/* | /* | ||||
* Since the dynamic PIRQ EOI map is not available | * Since the dynamic PIRQ EOI map is not available | ||||
* mark the PIRQ as needing EOI unconditionally. | * mark the PIRQ as needing EOI unconditionally. | ||||
*/ | */ | ||||
set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map); | xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map); | ||||
} | } | ||||
} | } | ||||
bind_pirq.pirq = isrc->xi_pirq; | bind_pirq.pirq = isrc->xi_pirq; | ||||
bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE; | bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE; | ||||
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); | error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); | ||||
if (error) | if (error) | ||||
panic("unable to bind IRQ#%d", isrc->xi_pirq); | panic("unable to bind IRQ#%d", isrc->xi_pirq); | ||||
▲ Show 20 Lines • Show All 504 Lines • ▼ Show 20 Lines | xen_intr_dump_port(struct xenisrc *isrc) | ||||
int i; | int i; | ||||
db_printf("Port %d Type: %s\n", | db_printf("Port %d Type: %s\n", | ||||
isrc->xi_port, xen_intr_print_type(isrc->xi_type)); | isrc->xi_port, xen_intr_print_type(isrc->xi_type)); | ||||
if (isrc->xi_type == EVTCHN_TYPE_PIRQ) { | if (isrc->xi_type == EVTCHN_TYPE_PIRQ) { | ||||
db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d " | db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d " | ||||
"NeedsEOI: %d\n", | "NeedsEOI: %d\n", | ||||
isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger, | isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger, | ||||
!!test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)); | !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)); | ||||
} | } | ||||
if (isrc->xi_type == EVTCHN_TYPE_VIRQ) | if (isrc->xi_type == EVTCHN_TYPE_VIRQ) | ||||
db_printf("\tVirq: %d\n", isrc->xi_virq); | db_printf("\tVirq: %d\n", isrc->xi_virq); | ||||
db_printf("\tMasked: %d Pending: %d\n", | db_printf("\tMasked: %d Pending: %d\n", | ||||
!!test_bit(isrc->xi_port, &s->evtchn_mask[0]), | !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]), | ||||
!!test_bit(isrc->xi_port, &s->evtchn_pending[0])); | !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0])); | ||||
db_printf("\tPer-CPU Masks: "); | db_printf("\tPer-CPU Masks: "); | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); | ||||
db_printf("cpu#%d: %d ", i, | db_printf("cpu#%d: %d ", i, | ||||
!!test_bit(isrc->xi_port, pcpu->evtchn_enabled)); | BIT_ISSET(ENABLED_SETSIZE, isrc->xi_port, | ||||
&pcpu->evtchn_enabled)); | |||||
} | } | ||||
db_printf("\n"); | db_printf("\n"); | ||||
} | } | ||||
DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn) | DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn) | ||||
{ | { | ||||
int i; | int i; | ||||
Show All 16 Lines |
I don't like accessing __bits directly, but I cannot find a macro or function to get this. Also all the BIT_* macros access __bits directly, so I guess it's fine :/.