diff --git a/sys/amd64/amd64/intr_machdep.c b/sys/amd64/amd64/intr_machdep.c index 7efc7ace54ce..23d9278456cc 100644 --- a/sys/amd64/amd64/intr_machdep.c +++ b/sys/amd64/amd64/intr_machdep.c @@ -1,603 +1,646 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for amd64. For amd64, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_atpic.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifndef DEV_ATPIC #include #include #include #include #include #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct sx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; #ifdef INTR_FILTER static void intr_eoi_src(void *arg); static void intr_disab_eoi_src(void *arg); static void intr_event_stray(void *cookie); #endif #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intsrc *isrc); #endif +static int intr_assign_cpu(void *arg, u_char cpu); static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; STAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; sx_xlock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { STAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } sx_xunlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); #ifdef INTR_FILTER error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, - intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector); + intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:", + vector); #else error = intr_event_create(&isrc->is_event, isrc, 0, - (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector); + (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:", + vector); #endif if (error) return (error); sx_xlock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; sx_xunlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { sx_xlock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { #ifdef SMP if (assign_cpu) intr_assign_next_cpu(isrc); #endif isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } sx_xunlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; int error; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { sx_xlock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); sx_xunlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } #ifdef INTR_FILTER void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; int vector; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; if (intr_event_handle(ie, frame) != 0) intr_event_stray(isrc); } static void intr_event_stray(void *cookie) { struct intsrc *isrc; isrc = cookie; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc)); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", isrc->is_pic->pic_vector(isrc)); } static void intr_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_eoi_source(isrc); } static void intr_disab_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } #else void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; struct intr_handler *ih; int error, vector, thread, ret; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); return; } /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ td->td_intr_nesting_level++; ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument == NULL ? frame : ih->ih_argument, ih->ih_name); if (ih->ih_argument == NULL) ret = ih->ih_filter(frame); else ret = ih->ih_filter(ih->ih_argument); /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } /* * If there are any threaded handlers that need to run, * mask the source as well as sending it an EOI. Otherwise, * just send it an EOI but leave it unmasked. */ if (thread) isrc->is_pic->pic_disable_source(isrc, PIC_EOI); else isrc->is_pic->pic_eoi_source(isrc); /* Schedule the ithread if needed. */ if (thread) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); } critical_exit(); td->td_intr_nesting_level--; } #endif void intr_resume(void) { struct pic *pic; #ifndef DEV_ATPIC atpic_reset(); #endif sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic); } sx_xunlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } sx_xunlock(&intr_table_lock); } +static int +intr_assign_cpu(void *arg, u_char cpu) +{ +#ifdef SMP + struct intsrc *isrc; + + /* + * Don't do anything during early boot. We will pick up the + * assignment once the APs are started. + */ + if (assign_cpu && cpu != NOCPU) { + isrc = arg; + sx_xlock(&intr_table_lock); + isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); + sx_xunlock(&intr_table_lock); + } + return (0); +#else + return (EOPNOTSUPP); +#endif +} + static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; STAILQ_INIT(&pics); sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL) #ifndef DEV_ATPIC /* Initialize the two 8259A's to a known-good shutdown state. */ void atpic_reset(void) { outb(IO_ICU1, ICW1_RESET | ICW1_IC4); outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); outb(IO_ICU1 + ICU_IMR_OFFSET, 1 << 2); outb(IO_ICU1 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU1, OCW3_SEL | OCW3_RR); outb(IO_ICU2, ICW1_RESET | ICW1_IC4); outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); outb(IO_ICU2 + ICU_IMR_OFFSET, 2); outb(IO_ICU2 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU2, OCW3_SEL | OCW3_RR); } #endif #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ /* The BSP is always a valid target. */ static cpumask_t intr_cpus = (1 << 0); static int current_cpu; static void intr_assign_next_cpu(struct intsrc *isrc) { - struct pic *pic; - u_int apic_id; /* * Assign this source to a local APIC in a round-robin fashion. */ - pic = isrc->is_pic; - apic_id = cpu_apic_ids[current_cpu]; - pic->pic_assign_cpu(isrc, apic_id); + isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]); do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!(intr_cpus & (1 << current_cpu))); } +/* Attempt to bind the specified IRQ to the specified CPU. */ +int +intr_bind(u_int vector, u_char cpu) +{ + struct intsrc *isrc; + + isrc = intr_lookup_source(vector); + if (isrc == NULL) + return (EINVAL); + return (intr_event_bind(isrc->is_event, cpu)); +} + /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); intr_cpus |= (1 << cpu); } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; /* Round-robin assign a CPU to each enabled source. */ sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; - if (isrc != NULL && isrc->is_handlers > 0) - intr_assign_next_cpu(isrc); + if (isrc != NULL && isrc->is_handlers > 0) { + /* + * If this event is already bound to a CPU, + * then assign the source to that CPU instead + * of picking one via round-robin. + */ + if (isrc->is_event->ie_cpu != NOCPU) + isrc->is_pic->pic_assign_cpu(isrc, + cpu_apic_ids[isrc->is_event->ie_cpu]); + else + intr_assign_next_cpu(isrc); + } } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL) #endif diff --git a/sys/amd64/include/intr_machdep.h b/sys/amd64/include/intr_machdep.h index 8b16c5e2fabc..7e1c43f65b90 100644 --- a/sys/amd64/include/intr_machdep.h +++ b/sys/amd64/include/intr_machdep.h @@ -1,160 +1,163 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __MACHINE_INTR_MACHDEP_H__ #define __MACHINE_INTR_MACHDEP_H__ #ifdef _KERNEL /* * The maximum number of I/O interrupts we allow. This number is rather * arbitrary as it is just the maximum IRQ resource value. The interrupt * source for a given IRQ maps that I/O interrupt to device interrupt * source whether it be a pin on an interrupt controller or an MSI interrupt. * The 16 ISA IRQs are assigned fixed IDT vectors, but all other device * interrupts allocate IDT vectors on demand. Currently we have 191 IDT * vectors available for device interrupts. On many systems with I/O APICs, * a lot of the IRQs are not used, so this number can be much larger than * 191 and still be safe since only interrupt sources in actual use will * allocate IDT vectors. * * The first 255 IRQs (0 - 254) are reserved for ISA IRQs and PCI intline IRQs. * IRQ values beyond 256 are used by MSI. We leave 255 unused to avoid * confusion since 255 is used in PCI to indicate an invalid IRQ. */ #define NUM_MSI_INTS 128 #define FIRST_MSI_INT 256 #define NUM_IO_INTS (FIRST_MSI_INT + NUM_MSI_INTS) /* * Default base address for MSI messages on x86 platforms. */ #define MSI_INTEL_ADDR_BASE 0xfee00000 /* * - 1 ??? dummy counter. * - 2 counters for each I/O interrupt. * - 1 counter for each CPU for lapic timer. * - 7 counters for each CPU for IPI counters for SMP. */ #ifdef SMP #define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + (1 + 7) * MAXCPU) #else #define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + 1) #endif #ifndef LOCORE typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss); #define IDTVEC(name) __CONCAT(X,name) struct intsrc; /* * Methods that a PIC provides to mask/unmask a given interrupt source, * "turn on" the interrupt on the CPU side by setting up an IDT entry, and * return the vector associated with this source. */ struct pic { void (*pic_enable_source)(struct intsrc *); void (*pic_disable_source)(struct intsrc *, int); void (*pic_eoi_source)(struct intsrc *); void (*pic_enable_intr)(struct intsrc *); void (*pic_disable_intr)(struct intsrc *); int (*pic_vector)(struct intsrc *); int (*pic_source_pending)(struct intsrc *); void (*pic_suspend)(struct pic *); void (*pic_resume)(struct pic *); int (*pic_config_intr)(struct intsrc *, enum intr_trigger, enum intr_polarity); void (*pic_assign_cpu)(struct intsrc *, u_int apic_id); STAILQ_ENTRY(pic) pics; }; /* Flags for pic_disable_source() */ enum { PIC_EOI, PIC_NO_EOI, }; /* * An interrupt source. The upper-layer code uses the PIC methods to * control a given source. The lower-layer PIC drivers can store additional * private data in a given interrupt source such as an interrupt pin number * or an I/O APIC pointer. */ struct intsrc { struct pic *is_pic; struct intr_event *is_event; u_long *is_count; u_long *is_straycount; u_int is_index; u_int is_handlers; }; struct trapframe; extern struct mtx icu_lock; extern int elcr_found; #ifndef DEV_ATPIC void atpic_reset(void); #endif /* XXX: The elcr_* prototypes probably belong somewhere else. */ int elcr_probe(void); enum intr_trigger elcr_read_trigger(u_int irq); void elcr_resume(void); void elcr_write_trigger(u_int irq, enum intr_trigger trigger); #ifdef SMP void intr_add_cpu(u_int cpu); #endif int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep); +#ifdef SMP +int intr_bind(u_int vector, u_char cpu); +#endif int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol); void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame); struct intsrc *intr_lookup_source(int vector); int intr_register_pic(struct pic *pic); int intr_register_source(struct intsrc *isrc); int intr_remove_handler(void *cookie); void intr_resume(void); void intr_suspend(void); void intrcnt_add(const char *name, u_long **countp); void nexus_add_irq(u_long irq); int msi_alloc(device_t dev, int count, int maxcount, int *irqs); void msi_init(void); int msi_map(int irq, uint64_t *addr, uint32_t *data); int msi_release(int *irqs, int count); int msix_alloc(device_t dev, int *irq); int msix_release(int irq); #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* !__MACHINE_INTR_MACHDEP_H__ */ diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c index c9912206c5ec..fbaa96cce402 100644 --- a/sys/arm/arm/intr.c +++ b/sys/arm/arm/intr.c @@ -1,186 +1,186 @@ /* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard. * Copyright (c) 1994-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Soft interrupt and other generic interrupt functions. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include static struct intr_event *intr_events[NIRQ]; static int intrcnt_tab[NIRQ]; static int intrcnt_index = 0; static int last_printed = 0; void arm_handler_execute(struct trapframe *, int); #ifdef INTR_FILTER static void intr_disab_eoi_src(void *arg) { uintptr_t nb; nb = (uintptr_t)arg; arm_mask_irq(nb); } static void intr_eoi_src(void *arg) { uintptr_t nb; nb = (uintptr_t)arg; arm_unmask_irq(nb); } #endif void arm_setup_irqhandler(const char *name, driver_filter_t *filt, void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; if (irq < 0 || irq >= NIRQ) return; event = intr_events[irq]; if (event == NULL) { #ifdef INTR_FILTER error = intr_event_create(&event, (void *)irq, 0, (void (*)(void *))arm_unmask_irq, intr_eoi_src, - intr_disab_eoi_src, "intr%d:", irq); + intr_disab_eoi_src, NULL, "intr%d:", irq); #else error = intr_event_create(&event, (void *)irq, 0, - (void (*)(void *))arm_unmask_irq, "intr%d:", irq); + (void (*)(void *))arm_unmask_irq, NULL, "intr%d:", irq); #endif if (error) return; intr_events[irq] = event; last_printed += snprintf(intrnames + last_printed, MAXCOMLEN + 1, "irq%d: %s", irq, name); last_printed++; intrcnt_tab[irq] = intrcnt_index; intrcnt_index++; } intr_event_add_handler(event, name, filt, hand, arg, intr_priority(flags), flags, cookiep); } int arm_remove_irqhandler(void *cookie) { return (intr_event_remove_handler(cookie)); } void dosoftints(void); void dosoftints(void) { } void arm_handler_execute(struct trapframe *frame, int irqnb) { struct intr_event *event; struct thread *td = curthread; #ifdef INTR_FILTER int i; #else int i, thread, ret; struct intr_handler *ih; #endif PCPU_INC(cnt.v_intr); td->td_intr_nesting_level++; while ((i = arm_get_next_irq()) != -1) { #ifndef INTR_FILTER arm_mask_irq(i); #endif intrcnt[intrcnt_tab[i]]++; event = intr_events[i]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { #ifdef INTR_FILTER arm_mask_irq(i); #endif continue; } #ifdef INTR_FILTER intr_event_handle(event, frame); /* XXX: Log stray IRQs */ #else /* Execute fast handlers. */ ret = 0; thread = 0; TAILQ_FOREACH(ih, &event->ie_handlers, ih_next) { if (ih->ih_filter == NULL) thread = 1; else ret = ih->ih_filter(ih->ih_argument ? ih->ih_argument : frame); /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } /* Schedule thread if needed. */ if (thread) intr_event_schedule_thread(event); else arm_unmask_irq(i); #endif } td->td_intr_nesting_level--; } diff --git a/sys/i386/i386/intr_machdep.c b/sys/i386/i386/intr_machdep.c index 993b68b43870..057ceaf8babb 100644 --- a/sys/i386/i386/intr_machdep.c +++ b/sys/i386/i386/intr_machdep.c @@ -1,579 +1,622 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for i386. For the i386, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct sx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; #ifdef INTR_FILTER static void intr_eoi_src(void *arg); static void intr_disab_eoi_src(void *arg); static void intr_event_stray(void *cookie); #endif #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intsrc *isrc); #endif +static int intr_assign_cpu(void *arg, u_char cpu); static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; STAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; sx_xlock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { STAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } sx_xunlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); #ifdef INTR_FILTER error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, - intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector); + intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:", + vector); #else error = intr_event_create(&isrc->is_event, isrc, 0, - (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector); + (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:", + vector); #endif if (error) return (error); sx_xlock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; sx_xunlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { sx_xlock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { #ifdef SMP if (assign_cpu) intr_assign_next_cpu(isrc); #endif isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } sx_xunlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; int error; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { sx_xlock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); sx_xunlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } #ifdef INTR_FILTER void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; int vector; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; if (intr_event_handle(ie, frame) != 0) intr_event_stray(isrc); } static void intr_event_stray(void *cookie) { struct intsrc *isrc; isrc = cookie; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc)); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", isrc->is_pic->pic_vector(isrc)); } static void intr_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_eoi_source(isrc); } static void intr_disab_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } #else void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; struct intr_handler *ih; int error, vector, thread, ret; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); return; } /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ td->td_intr_nesting_level++; ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument == NULL ? frame : ih->ih_argument, ih->ih_name); if (ih->ih_argument == NULL) ret = ih->ih_filter(frame); else ret = ih->ih_filter(ih->ih_argument); /* * Wrapper handler special handling: * * in some particular cases (like pccard and pccbb), * the _real_ device handler is wrapped in a couple of * functions - a filter wrapper and an ithread wrapper. * In this case (and just in this case), the filter wrapper * could ask the system to schedule the ithread and mask * the interrupt source if the wrapped handler is composed * of just an ithread handler. * * TODO: write a generic wrapper to avoid people rolling * their own */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } /* * If there are any threaded handlers that need to run, * mask the source as well as sending it an EOI. Otherwise, * just send it an EOI but leave it unmasked. */ if (thread) isrc->is_pic->pic_disable_source(isrc, PIC_EOI); else isrc->is_pic->pic_eoi_source(isrc); /* Schedule the ithread if needed. */ if (thread) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); } critical_exit(); td->td_intr_nesting_level--; } #endif void intr_resume(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic); } sx_xunlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } sx_xunlock(&intr_table_lock); } +static int +intr_assign_cpu(void *arg, u_char cpu) +{ +#ifdef SMP + struct intsrc *isrc; + + /* + * Don't do anything during early boot. We will pick up the + * assignment once the APs are started. + */ + if (assign_cpu && cpu != NOCPU) { + isrc = arg; + sx_xlock(&intr_table_lock); + isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); + sx_xunlock(&intr_table_lock); + } + return (0); +#else + return (EOPNOTSUPP); +#endif +} + static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; STAILQ_INIT(&pics); sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL) #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ /* The BSP is always a valid target. */ static cpumask_t intr_cpus = (1 << 0); static int current_cpu; static void intr_assign_next_cpu(struct intsrc *isrc) { - struct pic *pic; - u_int apic_id; /* * Assign this source to a local APIC in a round-robin fashion. */ - pic = isrc->is_pic; - apic_id = cpu_apic_ids[current_cpu]; - pic->pic_assign_cpu(isrc, apic_id); + isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]); do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!(intr_cpus & (1 << current_cpu))); } +/* Attempt to bind the specified IRQ to the specified CPU. */ +int +intr_bind(u_int vector, u_char cpu) +{ + struct intsrc *isrc; + + isrc = intr_lookup_source(vector); + if (isrc == NULL) + return (EINVAL); + return (intr_event_bind(isrc->is_event, cpu)); +} + /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); intr_cpus |= (1 << cpu); } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; /* Round-robin assign a CPU to each enabled source. */ sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; - if (isrc != NULL && isrc->is_handlers > 0) - intr_assign_next_cpu(isrc); + if (isrc != NULL && isrc->is_handlers > 0) { + /* + * If this event is already bound to a CPU, + * then assign the source to that CPU instead + * of picking one via round-robin. + */ + if (isrc->is_event->ie_cpu != NOCPU) + isrc->is_pic->pic_assign_cpu(isrc, + cpu_apic_ids[isrc->is_event->ie_cpu]); + else + intr_assign_next_cpu(isrc); + } } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL) #endif diff --git a/sys/i386/include/intr_machdep.h b/sys/i386/include/intr_machdep.h index 4e6d07013fb4..2aeb0c9932a8 100644 --- a/sys/i386/include/intr_machdep.h +++ b/sys/i386/include/intr_machdep.h @@ -1,156 +1,159 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __MACHINE_INTR_MACHDEP_H__ #define __MACHINE_INTR_MACHDEP_H__ #ifdef _KERNEL /* * The maximum number of I/O interrupts we allow. This number is rather * arbitrary as it is just the maximum IRQ resource value. The interrupt * source for a given IRQ maps that I/O interrupt to device interrupt * source whether it be a pin on an interrupt controller or an MSI interrupt. * The 16 ISA IRQs are assigned fixed IDT vectors, but all other device * interrupts allocate IDT vectors on demand. Currently we have 191 IDT * vectors available for device interrupts. On many systems with I/O APICs, * a lot of the IRQs are not used, so this number can be much larger than * 191 and still be safe since only interrupt sources in actual use will * allocate IDT vectors. * * The first 255 IRQs (0 - 254) are reserved for ISA IRQs and PCI intline IRQs. * IRQ values beyond 256 are used by MSI. We leave 255 unused to avoid * confusion since 255 is used in PCI to indicate an invalid IRQ. */ #define NUM_MSI_INTS 128 #define FIRST_MSI_INT 256 #define NUM_IO_INTS (FIRST_MSI_INT + NUM_MSI_INTS) /* * Default base address for MSI messages on x86 platforms. */ #define MSI_INTEL_ADDR_BASE 0xfee00000 /* * - 1 ??? dummy counter. * - 2 counters for each I/O interrupt. * - 1 counter for each CPU for lapic timer. * - 7 counters for each CPU for IPI counters for SMP. */ #ifdef SMP #define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + (1 + 7) * MAXCPU) #else #define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + 1) #endif #ifndef LOCORE typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss); #define IDTVEC(name) __CONCAT(X,name) struct intsrc; /* * Methods that a PIC provides to mask/unmask a given interrupt source, * "turn on" the interrupt on the CPU side by setting up an IDT entry, and * return the vector associated with this source. */ struct pic { void (*pic_enable_source)(struct intsrc *); void (*pic_disable_source)(struct intsrc *, int); void (*pic_eoi_source)(struct intsrc *); void (*pic_enable_intr)(struct intsrc *); void (*pic_disable_intr)(struct intsrc *); int (*pic_vector)(struct intsrc *); int (*pic_source_pending)(struct intsrc *); void (*pic_suspend)(struct pic *); void (*pic_resume)(struct pic *); int (*pic_config_intr)(struct intsrc *, enum intr_trigger, enum intr_polarity); void (*pic_assign_cpu)(struct intsrc *, u_int apic_id); STAILQ_ENTRY(pic) pics; }; /* Flags for pic_disable_source() */ enum { PIC_EOI, PIC_NO_EOI, }; /* * An interrupt source. The upper-layer code uses the PIC methods to * control a given source. The lower-layer PIC drivers can store additional * private data in a given interrupt source such as an interrupt pin number * or an I/O APIC pointer. */ struct intsrc { struct pic *is_pic; struct intr_event *is_event; u_long *is_count; u_long *is_straycount; u_int is_index; u_int is_handlers; }; struct trapframe; extern struct mtx icu_lock; extern int elcr_found; /* XXX: The elcr_* prototypes probably belong somewhere else. */ int elcr_probe(void); enum intr_trigger elcr_read_trigger(u_int irq); void elcr_resume(void); void elcr_write_trigger(u_int irq, enum intr_trigger trigger); #ifdef SMP void intr_add_cpu(u_int cpu); #endif int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep); +#ifdef SMP +int intr_bind(u_int vector, u_char cpu); +#endif int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol); void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame); struct intsrc *intr_lookup_source(int vector); int intr_register_pic(struct pic *pic); int intr_register_source(struct intsrc *isrc); int intr_remove_handler(void *cookie); void intr_resume(void); void intr_suspend(void); void intrcnt_add(const char *name, u_long **countp); void nexus_add_irq(u_long irq); int msi_alloc(device_t dev, int count, int maxcount, int *irqs); void msi_init(void); int msi_map(int irq, uint64_t *addr, uint32_t *data); int msi_release(int* irqs, int count); int msix_alloc(device_t dev, int *irq); int msix_release(int irq); #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* !__MACHINE_INTR_MACHDEP_H__ */ diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c index 5d30566e8b98..d832dd81871b 100644 --- a/sys/ia64/ia64/interrupt.c +++ b/sys/ia64/ia64/interrupt.c @@ -1,487 +1,487 @@ /* $FreeBSD$ */ /* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */ /*- * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. * All rights reserved. * * Authors: Keith Bostic, Chris G. Demetriou * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /*- * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center. * Redistribute and modify at will, leaving only this additional copyright * notice. */ #include "opt_ddb.h" #include /* RCS ID & Copyright macro defns */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EVCNT_COUNTERS struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */ #else #include #include #endif #ifdef DDB #include #endif #ifdef SMP extern int mp_ipi_test; #endif static void ia64_dispatch_intr(void *, u_int); static void dummy_perf(unsigned long vector, struct trapframe *tf) { printf("performance interrupt!\n"); } void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf; static unsigned int ints[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, ints, CTLFLAG_RW, &ints, sizeof(ints), "IU", ""); static unsigned int clks[MAXCPU]; #ifdef SMP SYSCTL_OPAQUE(_debug, OID_AUTO, clks, CTLFLAG_RW, &clks, sizeof(clks), "IU", ""); #else SYSCTL_INT(_debug, OID_AUTO, clks, CTLFLAG_RW, clks, 0, ""); #endif #ifdef SMP static unsigned int asts[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, asts, CTLFLAG_RW, &asts, sizeof(asts), "IU", ""); static unsigned int rdvs[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, rdvs, CTLFLAG_RW, &rdvs, sizeof(rdvs), "IU", ""); #endif SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics"); static int adjust_edges = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_edges, CTLFLAG_RD, &adjust_edges, 0, "Number of times ITC got more than 12.5% behind"); static int adjust_excess = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_excess, CTLFLAG_RD, &adjust_excess, 0, "Total number of ignored ITC interrupts"); static int adjust_lost = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_lost, CTLFLAG_RD, &adjust_lost, 0, "Total number of lost ITC interrupts"); static int adjust_ticks = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks, 0, "Total number of ITC interrupts with adjustment"); void interrupt(struct trapframe *tf) { struct thread *td; volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; uint64_t adj, clk, itc; int64_t delta; u_int vector; int count; uint8_t inta; ia64_set_fpsr(IA64_FPSR_DEFAULT); td = curthread; atomic_add_int(&td->td_intr_nesting_level, 1); vector = tf->tf_special.ifa; next: /* * Handle ExtINT interrupts by generating an INTA cycle to * read the vector. */ if (vector == 0) { inta = ib->ib_inta; printf("ExtINT interrupt: vector=%u\n", (int)inta); if (inta == 15) { __asm __volatile("mov cr.eoi = r0;; srlz.d"); goto stray; } vector = (int)inta; } else if (vector == 15) goto stray; if (vector == CLOCK_VECTOR) {/* clock interrupt */ /* CTR0(KTR_INTR, "clock interrupt"); */ itc = ia64_get_itc(); PCPU_INC(cnt.v_intr); #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; #else intrcnt[INTRCNT_CLOCK]++; #endif clks[PCPU_GET(cpuid)]++; critical_enter(); adj = PCPU_GET(clockadj); clk = PCPU_GET(clock); delta = itc - clk; count = 0; while (delta >= ia64_clock_reload) { /* Only the BSP runs the real clock */ if (PCPU_GET(cpuid) == 0) hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf)); else hardclock_cpu(TRAPF_USERMODE(tf)); if (profprocs != 0) profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf)); statclock(TRAPF_USERMODE(tf)); delta -= ia64_clock_reload; clk += ia64_clock_reload; if (adj != 0) adjust_ticks++; count++; } ia64_set_itm(itc + ia64_clock_reload - adj); if (count > 0) { adjust_lost += count - 1; if (delta > (ia64_clock_reload >> 3)) { if (adj == 0) adjust_edges++; adj = ia64_clock_reload >> 4; } else adj = 0; } else { adj = 0; adjust_excess++; } PCPU_SET(clock, clk); PCPU_SET(clockadj, adj); critical_exit(); ia64_srlz_d(); #ifdef SMP } else if (vector == ipi_vector[IPI_AST]) { asts[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); } else if (vector == ipi_vector[IPI_HIGH_FP]) { struct thread *thr = PCPU_GET(fpcurthread); if (thr != NULL) { mtx_lock_spin(&thr->td_md.md_highfp_mtx); save_high_fp(&thr->td_pcb->pcb_high_fp); thr->td_pcb->pcb_fpcpu = NULL; PCPU_SET(fpcurthread, NULL); mtx_unlock_spin(&thr->td_md.md_highfp_mtx); } } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { rdvs[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); } else if (vector == ipi_vector[IPI_STOP]) { cpumask_t mybit = PCPU_GET(cpumask); savectx(PCPU_PTR(pcb)); atomic_set_int(&stopped_cpus, mybit); while ((started_cpus & mybit) == 0) cpu_spinwait(); atomic_clear_int(&started_cpus, mybit); atomic_clear_int(&stopped_cpus, mybit); } else if (vector == ipi_vector[IPI_TEST]) { CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid)); mp_ipi_test++; #endif } else { ints[PCPU_GET(cpuid)]++; ia64_dispatch_intr(tf, vector); } __asm __volatile("mov cr.eoi = r0;; srlz.d"); vector = ia64_get_ivr(); if (vector != 15) goto next; stray: atomic_subtract_int(&td->td_intr_nesting_level, 1); if (TRAPF_USERMODE(tf)) { enable_intr(); userret(td, tf); mtx_assert(&Giant, MA_NOTOWNED); do_ast(tf); } } /* * Hardware irqs have vectors starting at this offset. */ #define IA64_HARDWARE_IRQ_BASE 0x20 struct ia64_intr { struct intr_event *event; /* interrupt event */ volatile long *cntp; /* interrupt counter */ struct sapic *sapic; u_int irq; }; static struct ia64_intr *ia64_intrs[256]; static void ia64_intr_eoi(void *arg) { u_int vector = (uintptr_t)arg; struct ia64_intr *i; i = ia64_intrs[vector]; if (i != NULL) sapic_eoi(i->sapic, vector); } static void ia64_intr_mask(void *arg) { u_int vector = (uintptr_t)arg; struct ia64_intr *i; i = ia64_intrs[vector]; if (i != NULL) { sapic_mask(i->sapic, i->irq); sapic_eoi(i->sapic, vector); } } static void ia64_intr_unmask(void *arg) { u_int vector = (uintptr_t)arg; struct ia64_intr *i; i = ia64_intrs[vector]; if (i != NULL) sapic_unmask(i->sapic, i->irq); } int ia64_setup_intr(const char *name, int irq, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct ia64_intr *i; struct sapic *sa; char *intrname; u_int vector; int error; /* Get the I/O SAPIC that corresponds to the IRQ. */ sa = sapic_lookup(irq); if (sa == NULL) return (EINVAL); /* * XXX - There's a priority implied by the choice of vector. * We should therefore relate the vector to the interrupt type. */ vector = irq + IA64_HARDWARE_IRQ_BASE; i = ia64_intrs[vector]; if (i == NULL) { i = malloc(sizeof(struct ia64_intr), M_DEVBUF, M_NOWAIT); if (i == NULL) return (ENOMEM); error = intr_event_create(&i->event, (void *)(uintptr_t)vector, 0, ia64_intr_unmask, #ifdef INTR_FILTER ia64_intr_eoi, ia64_intr_mask, #endif - "irq%u:", irq); + NULL, "irq%u:", irq); if (error) { free(i, M_DEVBUF); return (error); } if (!atomic_cmpset_ptr(&ia64_intrs[vector], NULL, i)) { intr_event_destroy(i->event); free(i, M_DEVBUF); i = ia64_intrs[vector]; } else { i->sapic = sa; i->irq = irq; i->cntp = intrcnt + irq + INTRCNT_ISA_IRQ; if (name != NULL && *name != '\0') { /* XXX needs abstraction. Too error prone. */ intrname = intrnames + (irq + INTRCNT_ISA_IRQ) * INTRNAME_LEN; memset(intrname, ' ', INTRNAME_LEN - 1); bcopy(name, intrname, strlen(name)); } sapic_enable(i->sapic, irq, vector); } } error = intr_event_add_handler(i->event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); return (error); } int ia64_teardown_intr(void *cookie) { return (intr_event_remove_handler(cookie)); } static void ia64_dispatch_intr(void *frame, u_int vector) { struct ia64_intr *i; struct intr_event *ie; /* our interrupt event */ #ifndef INTR_FILTER struct intr_handler *ih; int error, thread, ret; #endif /* * Find the interrupt thread for this vector. */ i = ia64_intrs[vector]; KASSERT(i != NULL, ("%s: unassigned vector", __func__)); (*i->cntp)++; ie = i->event; KASSERT(ie != NULL, ("%s: interrupt without event", __func__)); #ifdef INTR_FILTER if (intr_event_handle(ie, frame) != 0) { ia64_intr_mask((void *)(uintptr_t)vector); log(LOG_ERR, "stray irq%u\n", i->irq); } #else /* * As an optimization, if an event has no handlers, don't * schedule it to run. */ if (TAILQ_EMPTY(&ie->ie_handlers)) return; /* * Execute all fast interrupt handlers directly without Giant. Note * that this means that any fast interrupt handler must be MP safe. */ ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument, ih->ih_name); ret = ih->ih_filter(ih->ih_argument); /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } if (thread) { ia64_intr_mask((void *)(uintptr_t)vector); error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("%s: impossible stray", __func__)); } else ia64_intr_eoi((void *)(uintptr_t)vector); critical_exit(); #endif } #ifdef DDB static void db_print_vector(u_int vector, int always) { struct ia64_intr *i; i = ia64_intrs[vector]; if (i != NULL) { db_printf("vector %u (%p): ", vector, i); sapic_print(i->sapic, i->irq); } else if (always) db_printf("vector %u: unassigned\n", vector); } DB_SHOW_COMMAND(vector, db_show_vector) { u_int vector; if (have_addr) { vector = ((addr >> 4) % 16) * 10 + (addr % 16); if (vector >= 256) db_printf("error: vector %u not in range [0..255]\n", vector); else db_print_vector(vector, 1); } else { for (vector = 0; vector < 256; vector++) db_print_vector(vector, 0); } } #endif diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index d9b983a1bab5..3d02073fcb21 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -1,1564 +1,1652 @@ /*- * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #ifdef DDB #include #include #endif /* * Describe an interrupt thread. There is one of these per interrupt event. */ struct intr_thread { struct intr_event *it_event; struct thread *it_thread; /* Kernel thread. */ int it_flags; /* (j) IT_* flags. */ int it_need; /* Needs service. */ }; /* Interrupt thread flags kept in it_flags */ #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ struct intr_entropy { struct thread *td; uintptr_t event; }; struct intr_event *clk_intr_event; struct intr_event *tty_intr_event; void *softclock_ih; void *vm_ih; struct proc *intrproc; static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); static int intr_storm_threshold = 1000; TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, &intr_storm_threshold, 0, "Number of consecutive interrupts before storm protection is enabled"); static TAILQ_HEAD(, intr_event) event_list = TAILQ_HEAD_INITIALIZER(event_list); static void intr_event_update(struct intr_event *ie); #ifdef INTR_FILTER static struct intr_thread *ithread_create(const char *name, struct intr_handler *ih); #else static struct intr_thread *ithread_create(const char *name); #endif static void ithread_destroy(struct intr_thread *ithread); static void ithread_execute_handlers(struct proc *p, struct intr_event *ie); #ifdef INTR_FILTER static void priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih); #endif static void ithread_loop(void *); static void ithread_update(struct intr_thread *ithd); static void start_softintr(void *); /* Map an interrupt type to an ithread priority. */ u_char intr_priority(enum intr_type flags) { u_char pri; flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); switch (flags) { case INTR_TYPE_TTY: pri = PI_TTYLOW; break; case INTR_TYPE_BIO: /* * XXX We need to refine this. BSD/OS distinguishes * between tape and disk priorities. */ pri = PI_DISK; break; case INTR_TYPE_NET: pri = PI_NET; break; case INTR_TYPE_CAM: pri = PI_DISK; /* XXX or PI_CAM? */ break; case INTR_TYPE_AV: /* Audio/video */ pri = PI_AV; break; case INTR_TYPE_CLK: pri = PI_REALTIME; break; case INTR_TYPE_MISC: pri = PI_DULL; /* don't care */ break; default: /* We didn't specify an interrupt level. */ panic("intr_priority: no interrupt type in flags"); } return pri; } /* * Update an ithread based on the associated intr_event. */ static void ithread_update(struct intr_thread *ithd) { struct intr_event *ie; struct thread *td; u_char pri; ie = ithd->it_event; td = ithd->it_thread; /* Determine the overall priority of this event. */ if (TAILQ_EMPTY(&ie->ie_handlers)) pri = PRI_MAX_ITHD; else pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; /* Update name and priority. */ strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); thread_lock(td); sched_prio(td, pri); thread_unlock(td); } /* * Regenerate the full name of an interrupt event and update its priority. */ static void intr_event_update(struct intr_event *ie) { struct intr_handler *ih; char *last; int missed, space; /* Start off with no entropy and just the name of the event. */ mtx_assert(&ie->ie_lock, MA_OWNED); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); ie->ie_flags &= ~IE_ENTROPY; missed = 0; space = 1; /* Run through all the handlers updating values. */ TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < sizeof(ie->ie_fullname)) { strcat(ie->ie_fullname, " "); strcat(ie->ie_fullname, ih->ih_name); space = 0; } else missed++; if (ih->ih_flags & IH_ENTROPY) ie->ie_flags |= IE_ENTROPY; } /* * If the handler names were too long, add +'s to indicate missing * names. If we run out of room and still have +'s to add, change * the last character from a + to a *. */ last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; while (missed-- > 0) { if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { if (*last == '+') { *last = '*'; break; } else *last = '+'; } else if (space) { strcat(ie->ie_fullname, " +"); space = 0; } else strcat(ie->ie_fullname, "+"); } /* * If this event has an ithread, update it's priority and * name. */ if (ie->ie_thread != NULL) ithread_update(ie->ie_thread); CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); } #ifndef INTR_FILTER int intr_event_create(struct intr_event **event, void *source, int flags, - void (*enable)(void *), const char *fmt, ...) + void (*enable)(void *), int (*assign_cpu)(void *, u_char), const char *fmt, + ...) { struct intr_event *ie; va_list ap; /* The only valid flag during creation is IE_SOFT. */ if ((flags & ~IE_SOFT) != 0) return (EINVAL); ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); ie->ie_source = source; ie->ie_enable = enable; + ie->ie_assign_cpu = assign_cpu; ie->ie_flags = flags; + ie->ie_cpu = NOCPU; TAILQ_INIT(&ie->ie_handlers); mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); va_start(ap, fmt); vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); va_end(ap); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_INSERT_TAIL(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); if (event != NULL) *event = ie; CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); return (0); } #else int intr_event_create(struct intr_event **event, void *source, int flags, void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), - const char *fmt, ...) + int (*assign_cpu)(void *, u_char), const char *fmt, ...) { struct intr_event *ie; va_list ap; /* The only valid flag during creation is IE_SOFT. */ if ((flags & ~IE_SOFT) != 0) return (EINVAL); ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); ie->ie_source = source; ie->ie_enable = enable; + ie->ie_assign_cpu = assign_cpu; ie->ie_eoi = eoi; ie->ie_disab = disab; ie->ie_flags = flags; + ie->ie_cpu = NOCPU; TAILQ_INIT(&ie->ie_handlers); mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); va_start(ap, fmt); vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); va_end(ap); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_INSERT_TAIL(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); if (event != NULL) *event = ie; CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); return (0); } #endif +/* + * Bind an interrupt event to the specified CPU. Note that not all + * platforms support binding an interrupt to a CPU. For those + * platforms this request will fail. For supported platforms, any + * associated ithreads as well as the primary interrupt context will + * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds + * the interrupt event. + */ +int +intr_event_bind(struct intr_event *ie, u_char cpu) +{ + struct thread *td; + int error; + + /* Need a CPU to bind to. */ + if (cpu != NOCPU && CPU_ABSENT(cpu)) + return (EINVAL); + + if (ie->ie_assign_cpu == NULL) + return (EOPNOTSUPP); + + /* Don't allow a bind request if the interrupt is already bound. */ + mtx_lock(&ie->ie_lock); + if (ie->ie_cpu != NOCPU && cpu != NOCPU) { + mtx_unlock(&ie->ie_lock); + return (EBUSY); + } + mtx_unlock(&ie->ie_lock); + + error = ie->ie_assign_cpu(ie->ie_source, cpu); + if (error) + return (error); + mtx_lock(&ie->ie_lock); + if (ie->ie_thread != NULL) + td = ie->ie_thread->it_thread; + else + td = NULL; + if (td != NULL) + thread_lock(td); + ie->ie_cpu = cpu; + if (td != NULL) + thread_unlock(td); + mtx_unlock(&ie->ie_lock); + return (0); +} + int intr_event_destroy(struct intr_event *ie) { mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { mtx_unlock(&ie->ie_lock); return (EBUSY); } mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_REMOVE(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); #ifndef notyet if (ie->ie_thread != NULL) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); mtx_destroy(&ie->ie_lock); free(ie, M_ITHREAD); return (0); } #ifndef INTR_FILTER static struct intr_thread * ithread_create(const char *name) { struct intr_thread *ithd; struct thread *td; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); error = kproc_kthread_add(ithread_loop, ithd, &intrproc, &td, RFSTOPPED | RFHIGHPID, 0, "intr", "%s", name); if (error) panic("kproc_create() failed with %d", error); thread_lock(td); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); thread_unlock(td); td->td_pflags |= TDP_ITHREAD; ithd->it_thread = td; CTR2(KTR_INTR, "%s: created %s", __func__, name); return (ithd); } #else static struct intr_thread * ithread_create(const char *name, struct intr_handler *ih) { struct intr_thread *ithd; struct thread *td; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); error = kproc_kthread_add(ithread_loop, ih, &intrproc, &td, RFSTOPPED | RFHIGHPID, 0, "intr", "%s", name); if (error) panic("kproc_create() failed with %d", error); thread_lock(td); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); thread_unlock(td); td->td_pflags |= TDP_ITHREAD; ithd->it_thread = td; CTR2(KTR_INTR, "%s: created %s", __func__, name); return (ithd); } #endif static void ithread_destroy(struct intr_thread *ithread) { struct thread *td; CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); td = ithread->it_thread; thread_lock(td); ithread->it_flags |= IT_DEAD; if (TD_AWAITING_INTR(td)) { TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } thread_unlock(td); } #ifndef INTR_FILTER int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep) { struct intr_handler *ih, *temp_ih; struct intr_thread *it; if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) return (EINVAL); /* Allocate and populate an interrupt handler structure. */ ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); ih->ih_filter = filter; ih->ih_handler = handler; ih->ih_argument = arg; ih->ih_name = name; ih->ih_event = ie; ih->ih_pri = pri; if (flags & INTR_EXCL) ih->ih_flags = IH_EXCLUSIVE; if (flags & INTR_MPSAFE) ih->ih_flags |= IH_MPSAFE; if (flags & INTR_ENTROPY) ih->ih_flags |= IH_ENTROPY; /* We can only have one exclusive handler in a event. */ mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { if ((flags & INTR_EXCL) || (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { mtx_unlock(&ie->ie_lock); free(ih, M_ITHREAD); return (EINVAL); } } /* Add the new handler to the event in priority order. */ TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { if (temp_ih->ih_pri > ih->ih_pri) break; } if (temp_ih == NULL) TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); else TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); intr_event_update(ie); /* Create a thread if we need one. */ while (ie->ie_thread == NULL && handler != NULL) { if (ie->ie_flags & IE_ADDING_THREAD) msleep(ie, &ie->ie_lock, 0, "ithread", 0); else { ie->ie_flags |= IE_ADDING_THREAD; mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn"); mtx_lock(&ie->ie_lock); ie->ie_flags &= ~IE_ADDING_THREAD; ie->ie_thread = it; it->it_event = ie; ithread_update(it); wakeup(ie); } } CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, ie->ie_name); mtx_unlock(&ie->ie_lock); if (cookiep != NULL) *cookiep = ih; return (0); } #else int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep) { struct intr_handler *ih, *temp_ih; struct intr_thread *it; if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) return (EINVAL); /* Allocate and populate an interrupt handler structure. */ ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); ih->ih_filter = filter; ih->ih_handler = handler; ih->ih_argument = arg; ih->ih_name = name; ih->ih_event = ie; ih->ih_pri = pri; if (flags & INTR_EXCL) ih->ih_flags = IH_EXCLUSIVE; if (flags & INTR_MPSAFE) ih->ih_flags |= IH_MPSAFE; if (flags & INTR_ENTROPY) ih->ih_flags |= IH_ENTROPY; /* We can only have one exclusive handler in a event. */ mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { if ((flags & INTR_EXCL) || (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { mtx_unlock(&ie->ie_lock); free(ih, M_ITHREAD); return (EINVAL); } } /* Add the new handler to the event in priority order. */ TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { if (temp_ih->ih_pri > ih->ih_pri) break; } if (temp_ih == NULL) TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); else TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); intr_event_update(ie); /* For filtered handlers, create a private ithread to run on. */ if (filter != NULL && handler != NULL) { mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn", ih); mtx_lock(&ie->ie_lock); it->it_event = ie; ih->ih_thread = it; ithread_update(it); // XXX - do we really need this?!?!? } else { /* Create the global per-event thread if we need one. */ while (ie->ie_thread == NULL && handler != NULL) { if (ie->ie_flags & IE_ADDING_THREAD) msleep(ie, &ie->ie_lock, 0, "ithread", 0); else { ie->ie_flags |= IE_ADDING_THREAD; mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn", ih); mtx_lock(&ie->ie_lock); ie->ie_flags &= ~IE_ADDING_THREAD; ie->ie_thread = it; it->it_event = ie; ithread_update(it); wakeup(ie); } } } CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, ie->ie_name); mtx_unlock(&ie->ie_lock); if (cookiep != NULL) *cookiep = ih; return (0); } #endif /* * Return the ie_source field from the intr_event an intr_handler is * associated with. */ void * intr_handler_source(void *cookie) { struct intr_handler *ih; struct intr_event *ie; ih = (struct intr_handler *)cookie; if (ih == NULL) return (NULL); ie = ih->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", ih->ih_name)); return (ie->ie_source); } #ifndef INTR_FILTER int intr_event_remove_handler(void *cookie) { struct intr_handler *handler = (struct intr_handler *)cookie; struct intr_event *ie; #ifdef INVARIANTS struct intr_handler *ih; #endif #ifdef notyet int dead; #endif if (handler == NULL) return (EINVAL); ie = handler->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", handler->ih_name)); mtx_lock(&ie->ie_lock); CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, ie->ie_name); #ifdef INVARIANTS TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) if (ih == handler) goto ok; mtx_unlock(&ie->ie_lock); panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", ih->ih_name, ie->ie_name); ok: #endif /* * If there is no ithread, then just remove the handler and return. * XXX: Note that an INTR_FAST handler might be running on another * CPU! */ if (ie->ie_thread == NULL) { TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } /* * If the interrupt thread is already running, then just mark this * handler as being dead and let the ithread do the actual removal. * * During a cold boot while cold is set, msleep() does not sleep, * so we have to remove the handler here rather than letting the * thread do it. */ thread_lock(ie->ie_thread->it_thread); if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { handler->ih_flags |= IH_DEAD; /* * Ensure that the thread will process the handler list * again and remove this handler if it has already passed * it on the list. */ ie->ie_thread->it_need = 1; } else TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); thread_unlock(ie->ie_thread->it_thread); while (handler->ih_flags & IH_DEAD) msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); intr_event_update(ie); #ifdef notyet /* * XXX: This could be bad in the case of ppbus(8). Also, I think * this could lead to races of stale data when servicing an * interrupt. */ dead = 1; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (!(ih->ih_flags & IH_FAST)) { dead = 0; break; } } if (dead) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } int intr_event_schedule_thread(struct intr_event *ie) { struct intr_entropy entropy; struct intr_thread *it; struct thread *td; struct thread *ctd; struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || ie->ie_thread == NULL) return (EINVAL); ctd = curthread; it = ie->ie_thread; td = it->it_thread; p = td->td_proc; /* * If any of the handlers for this ithread claim to be good * sources of entropy, then gather some. */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, p->p_pid, td->td_name); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, RANDOM_INTERRUPT); } KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already * running. Then, lock the thread and see if we actually need to * put it on the runqueue. */ it->it_need = 1; thread_lock(td); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, td->td_name); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, td->td_name, it->it_need, td->td_state); } thread_unlock(td); return (0); } #else int intr_event_remove_handler(void *cookie) { struct intr_handler *handler = (struct intr_handler *)cookie; struct intr_event *ie; struct intr_thread *it; #ifdef INVARIANTS struct intr_handler *ih; #endif #ifdef notyet int dead; #endif if (handler == NULL) return (EINVAL); ie = handler->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", handler->ih_name)); mtx_lock(&ie->ie_lock); CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, ie->ie_name); #ifdef INVARIANTS TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) if (ih == handler) goto ok; mtx_unlock(&ie->ie_lock); panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", ih->ih_name, ie->ie_name); ok: #endif /* * If there are no ithreads (per event and per handler), then * just remove the handler and return. * XXX: Note that an INTR_FAST handler might be running on another CPU! */ if (ie->ie_thread == NULL && handler->ih_thread == NULL) { TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } /* Private or global ithread? */ it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; /* * If the interrupt thread is already running, then just mark this * handler as being dead and let the ithread do the actual removal. * * During a cold boot while cold is set, msleep() does not sleep, * so we have to remove the handler here rather than letting the * thread do it. */ thread_lock(it->it_thread); if (!TD_AWAITING_INTR(it->it_thread) && !cold) { handler->ih_flags |= IH_DEAD; /* * Ensure that the thread will process the handler list * again and remove this handler if it has already passed * it on the list. */ it->it_need = 1; } else TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); thread_unlock(it->it_thread); while (handler->ih_flags & IH_DEAD) msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); /* * At this point, the handler has been disconnected from the event, * so we can kill the private ithread if any. */ if (handler->ih_thread) { ithread_destroy(handler->ih_thread); handler->ih_thread = NULL; } intr_event_update(ie); #ifdef notyet /* * XXX: This could be bad in the case of ppbus(8). Also, I think * this could lead to races of stale data when servicing an * interrupt. */ dead = 1; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (handler != NULL) { dead = 0; break; } } if (dead) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } int intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) { struct intr_entropy entropy; struct thread *td; struct thread *ctd; struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) return (EINVAL); ctd = curthread; td = it->it_thread; p = td->td_proc; /* * If any of the handlers for this ithread claim to be good * sources of entropy, then gather some. */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, p->p_pid, td->td_name); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, RANDOM_INTERRUPT); } KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already * running. Then, lock the thread and see if we actually need to * put it on the runqueue. */ it->it_need = 1; thread_lock(td); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, td->td_name); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, td->td_name, it->it_need, td->td_state); } thread_unlock(td); return (0); } #endif /* * Add a software interrupt handler to a specified event. If a given event * is not specified, then a new event is created. */ int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep) { struct intr_event *ie; int error; if (flags & INTR_ENTROPY) return (EINVAL); ie = (eventp != NULL) ? *eventp : NULL; if (ie != NULL) { if (!(ie->ie_flags & IE_SOFT)) return (EINVAL); } else { #ifdef INTR_FILTER error = intr_event_create(&ie, NULL, IE_SOFT, - NULL, NULL, NULL, "swi%d:", pri); + NULL, NULL, NULL, NULL, "swi%d:", pri); #else error = intr_event_create(&ie, NULL, IE_SOFT, - NULL, "swi%d:", pri); + NULL, NULL, "swi%d:", pri); #endif if (error) return (error); if (eventp != NULL) *eventp = ie; } return (intr_event_add_handler(ie, name, NULL, handler, arg, (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); } /* * Schedule a software interrupt thread. */ void swi_sched(void *cookie, int flags) { struct intr_handler *ih = (struct intr_handler *)cookie; struct intr_event *ie = ih->ih_event; int error; CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, ih->ih_need); /* * Set ih_need for this handler so that if the ithread is already * running it will execute this handler on the next pass. Otherwise, * it will execute it the next time it runs. */ atomic_store_rel_int(&ih->ih_need, 1); if (!(flags & SWI_DELAY)) { PCPU_INC(cnt.v_soft); #ifdef INTR_FILTER error = intr_event_schedule_thread(ie, ie->ie_thread); #else error = intr_event_schedule_thread(ie); #endif KASSERT(error == 0, ("stray software interrupt")); } } /* * Remove a software interrupt handler. Currently this code does not * remove the associated interrupt event if it becomes empty. Calling code * may do so manually via intr_event_destroy(), but that's not really * an optimal interface. */ int swi_remove(void *cookie) { return (intr_event_remove_handler(cookie)); } #ifdef INTR_FILTER static void priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) { struct intr_event *ie; ie = ih->ih_event; /* * If this handler is marked for death, remove it from * the list of handlers and wake up the sleeper. */ if (ih->ih_flags & IH_DEAD) { mtx_lock(&ie->ie_lock); TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); ih->ih_flags &= ~IH_DEAD; wakeup(ih); mtx_unlock(&ie->ie_lock); return; } /* Execute this handler. */ CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, ih->ih_name, ih->ih_flags); if (!(ih->ih_flags & IH_MPSAFE)) mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if (!(ih->ih_flags & IH_MPSAFE)) mtx_unlock(&Giant); } #endif static void ithread_execute_handlers(struct proc *p, struct intr_event *ie) { struct intr_handler *ih, *ihn; /* Interrupt handlers should not sleep. */ if (!(ie->ie_flags & IE_SOFT)) THREAD_NO_SLEEPING(); TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { /* * If this handler is marked for death, remove it from * the list of handlers and wake up the sleeper. */ if (ih->ih_flags & IH_DEAD) { mtx_lock(&ie->ie_lock); TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); ih->ih_flags &= ~IH_DEAD; wakeup(ih); mtx_unlock(&ie->ie_lock); continue; } /* Skip filter only handlers */ if (ih->ih_handler == NULL) continue; /* * For software interrupt threads, we only execute * handlers that have their need flag set. Hardware * interrupt threads always invoke all of their handlers. */ if (ie->ie_flags & IE_SOFT) { if (!ih->ih_need) continue; else atomic_store_rel_int(&ih->ih_need, 0); } /* Execute this handler. */ CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, ih->ih_name, ih->ih_flags); if (!(ih->ih_flags & IH_MPSAFE)) mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if (!(ih->ih_flags & IH_MPSAFE)) mtx_unlock(&Giant); } if (!(ie->ie_flags & IE_SOFT)) THREAD_SLEEPING_OK(); /* * Interrupt storm handling: * * If this interrupt source is currently storming, then throttle * it to only fire the handler once per clock tick. * * If this interrupt source is not currently storming, but the * number of back to back interrupts exceeds the storm threshold, * then enter storming mode. */ if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && !(ie->ie_flags & IE_SOFT)) { /* Report the message only once every second. */ if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { printf( "interrupt storm detected on \"%s\"; throttling interrupt source\n", ie->ie_name); } pause("istorm", 1); } else ie->ie_count++; /* * Now that all the handlers have had a chance to run, reenable * the interrupt source. */ if (ie->ie_enable != NULL) ie->ie_enable(ie->ie_source); } #ifndef INTR_FILTER /* * This is the main code for interrupt threads. */ static void ithread_loop(void *arg) { struct intr_thread *ithd; struct intr_event *ie; struct thread *td; struct proc *p; + u_char cpu; td = curthread; p = td->td_proc; ithd = (struct intr_thread *)arg; KASSERT(ithd->it_thread == td, ("%s: ithread and proc linkage out of sync", __func__)); ie = ithd->it_event; ie->ie_count = 0; + cpu = NOCPU; /* * As long as we have interrupts outstanding, go through the * list of handlers, giving each one a go at it. */ for (;;) { /* * If we are an orphaned thread, then just die. */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, p->p_pid, td->td_name); free(ithd, M_ITHREAD); kthread_exit(); } /* * Service interrupts. If another interrupt arrives while * we are running, it will set it_need to note that we * should make another pass. */ while (ithd->it_need) { /* * This might need a full read and write barrier * to make sure that this write posts before any * of the memory or device accesses in the * handlers. */ atomic_store_rel_int(&ithd->it_need, 0); ithread_execute_handlers(p, ie); } WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); mtx_assert(&Giant, MA_NOTOWNED); /* * Processed all our interrupts. Now get the sched * lock. This may take a while and it_need may get * set again, so we have to check it again. */ thread_lock(td); if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { TD_SET_IWAIT(td); ie->ie_count = 0; mi_switch(SW_VOL, NULL); } + +#ifdef SMP + /* + * Ensure we are bound to the correct CPU. We can't + * move ithreads until SMP is running however, so just + * leave interrupts on the boor CPU during boot. + */ + if (ie->ie_cpu != cpu && smp_started) { + cpu = ie->ie_cpu; + if (cpu == NOCPU) + sched_unbind(td); + else + sched_bind(td, cpu); + } +#endif thread_unlock(td); } } #else /* * This is the main code for interrupt threads. */ static void ithread_loop(void *arg) { struct intr_thread *ithd; struct intr_handler *ih; struct intr_event *ie; struct thread *td; struct proc *p; int priv; + u_char cpu; td = curthread; p = td->td_proc; ih = (struct intr_handler *)arg; priv = (ih->ih_thread != NULL) ? 1 : 0; ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; KASSERT(ithd->it_thread == td, ("%s: ithread and proc linkage out of sync", __func__)); ie = ithd->it_event; ie->ie_count = 0; + cpu = NOCPU; /* * As long as we have interrupts outstanding, go through the * list of handlers, giving each one a go at it. */ for (;;) { /* * If we are an orphaned thread, then just die. */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, p->p_pid, td->td_name); free(ithd, M_ITHREAD); kthread_exit(); } /* * Service interrupts. If another interrupt arrives while * we are running, it will set it_need to note that we * should make another pass. */ while (ithd->it_need) { /* * This might need a full read and write barrier * to make sure that this write posts before any * of the memory or device accesses in the * handlers. */ atomic_store_rel_int(&ithd->it_need, 0); if (priv) priv_ithread_execute_handler(p, ih); else ithread_execute_handlers(p, ie); } WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); mtx_assert(&Giant, MA_NOTOWNED); /* * Processed all our interrupts. Now get the sched * lock. This may take a while and it_need may get * set again, so we have to check it again. */ thread_lock(td); if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { TD_SET_IWAIT(td); ie->ie_count = 0; mi_switch(SW_VOL, NULL); } + +#ifdef SMP + /* + * Ensure we are bound to the correct CPU. We can't + * move ithreads until SMP is running however, so just + * leave interrupts on the boor CPU during boot. + */ + if (!priv && ie->ie_cpu != cpu && smp_started) { + cpu = ie->ie_cpu; + if (cpu == NOCPU) + sched_unbind(td); + else + sched_bind(td, cpu); + } +#endif thread_unlock(td); } } /* * Main loop for interrupt filter. * * Some architectures (i386, amd64 and arm) require the optional frame * parameter, and use it as the main argument for fast handler execution * when ih_argument == NULL. * * Return value: * o FILTER_STRAY: No filter recognized the event, and no * filter-less handler is registered on this * line. * o FILTER_HANDLED: A filter claimed the event and served it. * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at * least one filter-less handler on this line. * o FILTER_HANDLED | * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for * scheduling the per-handler ithread. * * In case an ithread has to be scheduled, in *ithd there will be a * pointer to a struct intr_thread containing the thread to be * scheduled. */ int intr_filter_loop(struct intr_event *ie, struct trapframe *frame, struct intr_thread **ithd) { struct intr_handler *ih; void *arg; int ret, thread_only; ret = 0; thread_only = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, ih->ih_filter, ih->ih_handler, arg, ih->ih_name); if (ih->ih_filter != NULL) ret = ih->ih_filter(arg); else { thread_only = 1; continue; } if (ret & FILTER_STRAY) continue; else { *ithd = ih->ih_thread; return (ret); } } /* * No filters handled the interrupt and we have at least * one handler without a filter. In this case, we schedule * all of the filter-less handlers to run in the ithread. */ if (thread_only) { *ithd = ie->ie_thread; return (FILTER_SCHEDULE_THREAD); } return (FILTER_STRAY); } /* * Main interrupt handling body. * * Input: * o ie: the event connected to this interrupt. * o frame: some archs (i.e. i386) pass a frame to some. * handlers as their main argument. * Return value: * o 0: everything ok. * o EINVAL: stray interrupt. */ int intr_event_handle(struct intr_event *ie, struct trapframe *frame) { struct intr_thread *ithd; struct thread *td; int thread; ithd = NULL; td = curthread; if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) return (EINVAL); td->td_intr_nesting_level++; thread = 0; critical_enter(); thread = intr_filter_loop(ie, frame, &ithd); /* * If the interrupt was fully served, send it an EOI but leave * it unmasked. Otherwise, mask the source as well as sending * it an EOI. */ if (thread & FILTER_HANDLED) { if (ie->ie_eoi != NULL) ie->ie_eoi(ie->ie_source); } else { if (ie->ie_disab != NULL) ie->ie_disab(ie->ie_source); } critical_exit(); /* Interrupt storm logic */ if (thread & FILTER_STRAY) { ie->ie_count++; if (ie->ie_count < intr_storm_threshold) printf("Interrupt stray detection not present\n"); } /* Schedule an ithread if needed. */ if (thread & FILTER_SCHEDULE_THREAD) { if (intr_event_schedule_thread(ie, ithd) != 0) panic("%s: impossible stray interrupt", __func__); } td->td_intr_nesting_level--; return (0); } #endif #ifdef DDB /* * Dump details about an interrupt handler */ static void db_dump_intrhand(struct intr_handler *ih) { int comma; db_printf("\t%-10s ", ih->ih_name); switch (ih->ih_pri) { case PI_REALTIME: db_printf("CLK "); break; case PI_AV: db_printf("AV "); break; case PI_TTYHIGH: case PI_TTYLOW: db_printf("TTY "); break; case PI_TAPE: db_printf("TAPE"); break; case PI_NET: db_printf("NET "); break; case PI_DISK: case PI_DISKLOW: db_printf("DISK"); break; case PI_DULL: db_printf("DULL"); break; default: if (ih->ih_pri >= PI_SOFT) db_printf("SWI "); else db_printf("%4u", ih->ih_pri); break; } db_printf(" "); db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); db_printf("(%p)", ih->ih_argument); if (ih->ih_need || (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | IH_MPSAFE)) != 0) { db_printf(" {"); comma = 0; if (ih->ih_flags & IH_EXCLUSIVE) { if (comma) db_printf(", "); db_printf("EXCL"); comma = 1; } if (ih->ih_flags & IH_ENTROPY) { if (comma) db_printf(", "); db_printf("ENTROPY"); comma = 1; } if (ih->ih_flags & IH_DEAD) { if (comma) db_printf(", "); db_printf("DEAD"); comma = 1; } if (ih->ih_flags & IH_MPSAFE) { if (comma) db_printf(", "); db_printf("MPSAFE"); comma = 1; } if (ih->ih_need) { if (comma) db_printf(", "); db_printf("NEED"); } db_printf("}"); } db_printf("\n"); } /* * Dump details about a event. */ void db_dump_intr_event(struct intr_event *ie, int handlers) { struct intr_handler *ih; struct intr_thread *it; int comma; db_printf("%s ", ie->ie_fullname); it = ie->ie_thread; if (it != NULL) db_printf("(pid %d)", it->it_thread->td_proc->p_pid); else db_printf("(no thread)"); + if (ie->ie_cpu != NOCPU) + db_printf(" (CPU %d)", ie->ie_cpu); if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || (it != NULL && it->it_need)) { db_printf(" {"); comma = 0; if (ie->ie_flags & IE_SOFT) { db_printf("SOFT"); comma = 1; } if (ie->ie_flags & IE_ENTROPY) { if (comma) db_printf(", "); db_printf("ENTROPY"); comma = 1; } if (ie->ie_flags & IE_ADDING_THREAD) { if (comma) db_printf(", "); db_printf("ADDING_THREAD"); comma = 1; } if (it != NULL && it->it_need) { if (comma) db_printf(", "); db_printf("NEED"); } db_printf("}"); } db_printf("\n"); if (handlers) TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) db_dump_intrhand(ih); } /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(intr, db_show_intr) { struct intr_event *ie; int all, verbose; verbose = index(modif, 'v') != NULL; all = index(modif, 'a') != NULL; TAILQ_FOREACH(ie, &event_list, ie_list) { if (!all && TAILQ_EMPTY(&ie->ie_handlers)) continue; db_dump_intr_event(ie, verbose); if (db_pager_quit) break; } } #endif /* DDB */ /* * Start standard software interrupt threads */ static void start_softintr(void *dummy) { struct proc *p; if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, INTR_MPSAFE, &softclock_ih) || swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) panic("died while creating standard software ithreads"); p = clk_intr_event->ie_thread->it_thread->td_proc; PROC_LOCK(p); p->p_flag |= P_NOLOAD; PROC_UNLOCK(p); } SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) /* * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. * The data for this machine dependent, and the declarations are in machine * dependent code. The layout of intrnames and intrcnt however is machine * independent. * * We do not know the length of intrcnt and intrnames at compile time, so * calculate things at run time. */ static int sysctl_intrnames(SYSCTL_HANDLER_ARGS) { return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, req)); } SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, sysctl_intrnames, "", "Interrupt Names"); static int sysctl_intrcnt(SYSCTL_HANDLER_ARGS) { return (sysctl_handle_opaque(oidp, intrcnt, (char *)eintrcnt - (char *)intrcnt, req)); } SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); #ifdef DDB /* * DDB command to dump the interrupt statistics. */ DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) { u_long *i; char *cp; cp = intrnames; for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { if (*cp == '\0') break; if (*i != 0) db_printf("%s\t%lu\n", cp, *i); cp += strlen(cp) + 1; } } #endif diff --git a/sys/powerpc/powerpc/intr_machdep.c b/sys/powerpc/powerpc/intr_machdep.c index 83805cfee1e0..e05195fedb15 100644 --- a/sys/powerpc/powerpc/intr_machdep.c +++ b/sys/powerpc/powerpc/intr_machdep.c @@ -1,355 +1,355 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #define MAX_STRAY_LOG 5 MALLOC_DEFINE(M_INTR, "intr", "interrupt handler data"); struct powerpc_intr { struct intr_event *event; long *cntp; enum intr_trigger trig; enum intr_polarity pol; u_int irq; u_int vector; }; static struct powerpc_intr *powerpc_intrs[INTR_VECTORS]; static u_int nvectors; /* Allocated vectors */ static u_int stray_count; device_t pic; static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static struct powerpc_intr * intr_lookup(u_int irq) { char intrname[8]; struct powerpc_intr *i, *iscan; int vector; for (vector = 0; vector < nvectors; vector++) { i = powerpc_intrs[vector]; if (i != NULL && i->irq == irq) return (i); } i = malloc(sizeof(*i), M_INTR, M_NOWAIT); if (i == NULL) return (NULL); i->event = NULL; i->cntp = NULL; i->trig = INTR_TRIGGER_CONFORM; i->pol = INTR_POLARITY_CONFORM; i->irq = irq; i->vector = -1; /* XXX LOCK */ for (vector = 0; vector < INTR_VECTORS && vector <= nvectors; vector++) { iscan = powerpc_intrs[vector]; if (iscan != NULL && iscan->irq == irq) break; if (iscan == NULL && i->vector == -1) i->vector = vector; iscan = NULL; } if (iscan == NULL && i->vector != -1) { powerpc_intrs[i->vector] = i; sprintf(intrname, "irq%u:", i->irq); intrcnt_setname(intrname, i->vector); nvectors++; } /* XXX UNLOCK */ if (iscan != NULL || i->vector == -1) { free(i, M_INTR); i = iscan; } return (i); } #ifdef INTR_FILTER static void powerpc_intr_eoi(void *arg) { u_int irq = (uintptr_t)arg; PIC_EOI(pic, irq); } static void powerpc_intr_mask(void *arg) { u_int irq = (uintptr_t)arg; PIC_MASK(pic, irq); } #endif static void powerpc_intr_unmask(void *arg) { u_int irq = (uintptr_t)arg; PIC_UNMASK(pic, irq); } void powerpc_register_pic(device_t dev, u_int ipi) { pic = dev; } int powerpc_enable_intr(void) { struct powerpc_intr *i; int vector; for (vector = 0; vector < nvectors; vector++) { i = powerpc_intrs[vector]; if (i == NULL) continue; if (i->trig != INTR_TRIGGER_CONFORM || i->pol != INTR_POLARITY_CONFORM) PIC_CONFIG(pic, i->irq, i->trig, i->pol); if (i->event != NULL) PIC_ENABLE(pic, i->irq, vector); } return (0); } int powerpc_setup_intr(const char *name, u_int irq, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct powerpc_intr *i; int error; i = intr_lookup(irq); if (i == NULL) return (ENOMEM); if (i->event == NULL) { error = intr_event_create(&i->event, (void *)irq, 0, powerpc_intr_unmask, #ifdef INTR_FILTER powerpc_intr_eoi, powerpc_intr_mask, #endif - "irq%u:", irq); + NULL, "irq%u:", irq); if (error) return (error); i->cntp = &intrcnt[i->vector]; if (!cold) PIC_ENABLE(pic, i->irq, i->vector); } error = intr_event_add_handler(i->event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); intrcnt_setname(i->event->ie_fullname, i->vector); return (error); } int powerpc_teardown_intr(void *cookie) { return (intr_event_remove_handler(cookie)); } int powerpc_config_intr(int irq, enum intr_trigger trig, enum intr_polarity pol) { struct powerpc_intr *i; if (trig == INTR_TRIGGER_CONFORM && pol == INTR_POLARITY_CONFORM) return (0); i = intr_lookup(irq); if (i == NULL) return (ENOMEM); i->trig = trig; i->pol = pol; if (!cold) PIC_CONFIG(pic, irq, trig, pol); return (0); } void powerpc_dispatch_intr(u_int vector, struct trapframe *tf) { struct powerpc_intr *i; struct intr_event *ie; #ifndef INTR_FILTER struct intr_handler *ih; int error, sched, ret; #endif i = powerpc_intrs[vector]; if (i == NULL) goto stray; (*i->cntp)++; ie = i->event; KASSERT(ie != NULL, ("%s: interrupt without an event", __func__)); #ifdef INTR_FILTER if (intr_event_handle(ie, tf) != 0) { PIC_MASK(pic, i->irq); log(LOG_ERR, "stray irq%u\n", i->irq); } #else if (TAILQ_EMPTY(&ie->ie_handlers)) goto stray; /* * Execute all fast interrupt handlers directly without Giant. Note * that this means that any fast interrupt handler must be MP safe. */ ret = 0; sched = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { sched = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument, ih->ih_name); ret = ih->ih_filter(ih->ih_argument); /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!sched) { if (ret == FILTER_SCHEDULE_THREAD) sched = 1; } } if (sched) { PIC_MASK(pic, i->irq); error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("%s: impossible stray interrupt", __func__)); } else PIC_EOI(pic, i->irq); critical_exit(); #endif return; stray: stray_count++; if (stray_count <= MAX_STRAY_LOG) { printf("stray irq %d\n", i->irq); if (stray_count >= MAX_STRAY_LOG) { printf("got %d stray interrupts, not logging anymore\n", MAX_STRAY_LOG); } } if (i != NULL) PIC_MASK(pic, i->irq); } diff --git a/sys/sparc64/sparc64/intr_machdep.c b/sys/sparc64/sparc64/intr_machdep.c index 735b4c0f230f..091ed90754bb 100644 --- a/sys/sparc64/sparc64/intr_machdep.c +++ b/sys/sparc64/sparc64/intr_machdep.c @@ -1,436 +1,436 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static u_long intr_stray_count[IV_MAX]; static const char *pil_names[] = { "stray", "low", /* PIL_LOW */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "stop", /* PIL_STOP */ "stray", "stray", "stray", "stray", "stray", "stray", "stray", "fast", /* PIL_FAST */ "tick", /* PIL_TICK */ }; /* protect the intr_vectors table */ static struct mtx intr_table_lock; static void intr_enable_eoi(void *); static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); /* * not MPSAFE */ static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; } static int intrcnt_setname(const char *name, int index) { if (intrnames + (MAXCOMLEN + 1) * index >= eintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; u_long ps; ps = intr_disable(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); intr_handlers[pri] = ihf; intr_restore(ps); } static void intr_stray_level(struct trapframe *tf) { printf("stray level interrupt %ld\n", tf->tf_level); } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; iv = cookie; if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", iv->iv_vec); intr_stray_count[iv->iv_vec]++; if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG) printf("got %d stray interrupt %d's: not logging " "anymore\n", MAX_STRAY_LOG, iv->iv_vec); } } void intr_init1() { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; intr_vectors[i].iv_refcnt = 0; } intr_handlers[PIL_LOW] = intr_fast; } void intr_init2() { mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN); } static void intr_enable_eoi(void *arg) { struct intr_vector *iv; const struct intr_controller *ic; iv = arg; ic = iv->iv_ic; ic->ic_enable(iv); ic->ic_eoi(iv); } static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; #ifndef INTR_FILTER struct intr_event *ie; struct intr_handler *ih; int error, thread, ret; #endif iv = cookie; #ifndef INTR_FILTER ie = iv->iv_event; if (iv->iv_ic == NULL || ie == NULL) { intr_stray_vector(iv); return; } /* Execute fast interrupt handlers directly. */ ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL); CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__, ih->ih_filter, ih->ih_argument); ret = ih->ih_filter(ih->ih_argument); /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } if (!thread) intr_enable_eoi(iv); /* Schedule a heavyweight interrupt process. */ if (thread) error = intr_event_schedule_thread(ie); else if (TAILQ_EMPTY(&ie->ie_handlers)) error = EINVAL; else error = 0; critical_exit(); if (error == EINVAL) #else if (intr_event_handle(iv->iv_event, NULL) != 0) #endif intr_stray_vector(iv); } int intr_controller_register(int vec, const struct intr_controller *ic, void *icarg) { struct intr_event *ie; struct intr_vector *iv; int error; iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); if (ie != NULL) return (EEXIST); /* * Testing shows that at least with the interrupt controllers of * Psycho and Schizo bridges enabling an interrupt doesn't cause * an outstanding interrupt to be issued to the CPU. Thus we can't * use a function doing disable+EOI for the "disable" pointer as * done on other architectures because this would lead to a lost * interrupt if it triggers while we are still processing the * previous one. Instead we use an enable+EOI approach because as * outlined in the Tomatillo documentation clearing an interrupt * in the interrupt controller causes it to be (re)issued to the * CPU as long as the source of a level sensitive interrupt is * not cleared. */ error = intr_event_create(&ie, iv, 0, intr_enable_eoi, #ifdef INTR_FILTER - ic->ic_eoi, ic->ic_disable, "vec%d:", vec); + ic->ic_eoi, ic->ic_disable, NULL, "vec%d:", vec); #else - "vec%d:", vec); + NULL, "vec%d:", vec); #endif if (error != 0) return (error); mtx_lock_spin(&intr_table_lock); if (iv->iv_event != NULL) { mtx_unlock_spin(&intr_table_lock); intr_event_destroy(ie); return (EEXIST); } iv->iv_ic = ic; iv->iv_icarg = icarg; iv->iv_event = ie; iv->iv_mid = PCPU_GET(mid); mtx_unlock_spin(&intr_table_lock); return (0); } int inthand_add(const char *name, int vec, driver_filter_t *filt, driver_intr_t *handler, void *arg, int flags, void **cookiep) { const struct intr_controller *ic; struct intr_event *ie; struct intr_handler *ih; struct intr_vector *iv; int error, fast; iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); ic = iv->iv_ic; ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); if (ic == NULL || ie == NULL) return (EINVAL); error = intr_event_add_handler(ie, name, filt, handler, arg, intr_priority(flags), flags, cookiep); if (error != 0) return (error); mtx_lock_spin(&intr_table_lock); /* Disable the interrupt while we fiddle with it. */ ic->ic_disable(iv); iv->iv_refcnt++; if (iv->iv_refcnt == 1) intr_setup(filt != NULL ? PIL_FAST : PIL_ITHREAD, intr_fast, vec, intr_execute_handlers, iv); else if (filt != NULL) { /* * Check if we need to upgrade from PIL_ITHREAD to PIL_FAST. * Given that apart from the on-board SCCs and UARTs shared * interrupts are rather uncommon on sparc64 this sould be * pretty rare in practice. */ fast = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter != NULL && ih->ih_filter != filt) { fast = 1; break; } } if (fast == 0) intr_setup(PIL_FAST, intr_fast, vec, intr_execute_handlers, iv); } intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); /* Ensure the interrupt is cleared, it might have triggered before. */ intr_enable_eoi(iv); mtx_unlock_spin(&intr_table_lock); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); iv->iv_refcnt--; if (iv->iv_refcnt == 0) { /* * Don't disable the interrupt for now, so that * stray interrupts get detected... */ intr_setup(PIL_LOW, intr_fast, vec, intr_stray_vector, iv); } mtx_unlock_spin(&intr_table_lock); } return (error); } diff --git a/sys/sun4v/sun4v/intr_machdep.c b/sys/sun4v/sun4v/intr_machdep.c index 6b9f1fb0a534..80e9d902aa41 100644 --- a/sys/sun4v/sun4v/intr_machdep.c +++ b/sys/sun4v/sun4v/intr_machdep.c @@ -1,506 +1,506 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PANIC_IF(exp) if (unlikely(exp)) {panic("%s: %s:%d", #exp, __FILE__, __LINE__);} #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static u_long intr_stray_count[IV_MAX]; struct ithread_vector_handler { iv_func_t *ivh_handler; void *ivh_arg; u_int ivh_vec; }; static char *pil_names[] = { "stray", "low", /* PIL_LOW */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "stop", /* PIL_STOP */ "preempt", /* PIL_PREEMPT */ "stray", "stray", "stray", "stray", "stray", "stray", "fast", /* PIL_FAST */ "tick", /* PIL_TICK */ }; /* * XXX SUN4V_FIXME - the queue size values should * really be calculated based on the size of the partition * */ int cpu_q_entries = 128; int dev_q_entries = 128; static vm_offset_t *mondo_data_array; static vm_offset_t *cpu_list_array; static vm_offset_t *cpu_q_array; static vm_offset_t *dev_q_array; static vm_offset_t *rq_array; static vm_offset_t *nrq_array; static int cpu_list_size; /* protect the intr_vectors table */ static struct mtx intr_table_lock; static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); static void cpu_intrq_alloc(void); /* * not MPSAFE */ static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; } static int intrcnt_setname(const char *name, int index) { if (intrnames + (MAXCOMLEN + 1) * index >= eintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; u_long ps; ps = intr_disable_all(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); intr_handlers[pri] = ihf; intr_restore_all(ps); } static void intr_stray_level(struct trapframe *tf) { printf("stray level interrupt - pil=%ld\n", tf->tf_pil); } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; iv = cookie; if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", iv->iv_vec); intr_stray_count[iv->iv_vec]++; if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG) printf("got %d stray interrupt %d's: not logging " "anymore\n", MAX_STRAY_LOG, iv->iv_vec); } } static void intr_init(void) { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; } intr_handlers[PIL_LOW] = intr_fast; #ifdef SMP intr_handlers[PIL_AST] = cpu_ipi_ast; intr_handlers[PIL_RENDEZVOUS] = (ih_func_t *)smp_rendezvous_action; intr_handlers[PIL_STOP]= cpu_ipi_stop; intr_handlers[PIL_PREEMPT]= cpu_ipi_preempt; #endif mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN); cpu_intrq_alloc(); cpu_intrq_init(); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; struct intr_event *ie; struct intr_handler *ih; int fast, thread, ret; iv = cookie; ie = iv->iv_event; if (ie == NULL) { intr_stray_vector(iv); return; } ret = 0; fast = thread = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL); CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__, ih->ih_filter, ih->ih_argument); ret = ih->ih_filter(ih->ih_argument); fast = 1; /* * Wrapper handler special case: see * i386/intr_machdep.c::intr_execute_handlers() */ if (!thread) { if (ret == FILTER_SCHEDULE_THREAD) thread = 1; } } /* Schedule a heavyweight interrupt process. */ if (thread) intr_event_schedule_thread(ie); else if (TAILQ_EMPTY(&ie->ie_handlers)) intr_stray_vector(iv); if (fast) hv_intr_setstate(iv->iv_vec, HV_INTR_IDLE_STATE); } static void ithread_wrapper(void *arg) { struct ithread_vector_handler *ivh = (struct ithread_vector_handler *)arg; ivh->ivh_handler(ivh->ivh_arg); /* re-enable interrupt */ hv_intr_setstate(ivh->ivh_vec, HV_INTR_IDLE_STATE); } int inthand_add(const char *name, int vec, driver_filter_t *filt, void (*handler)(void *), void *arg, int flags, void **cookiep) { struct intr_vector *iv; struct intr_event *ie; /* descriptor for the IRQ */ struct intr_event *orphan; struct ithread_vector_handler *ivh; int errcode, pil; if (filt != NULL && handler != NULL) { printf("both filt and handler set is not valid\n"); return (EINVAL); } /* * Work around a race where more than one CPU may be registering * handlers on the same IRQ at the same time. */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); if (ie == NULL) { errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL, - "vec%d:", vec); + NULL, "vec%d:", vec); if (errcode) return (errcode); mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) { iv->iv_event = ie; mtx_unlock_spin(&intr_table_lock); } else { orphan = ie; ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); intr_event_destroy(orphan); } } if (filt == NULL) { ivh = (struct ithread_vector_handler *) malloc(sizeof(struct ithread_vector_handler), M_DEVBUF, M_WAITOK); ivh->ivh_handler = (driver_intr_t *)handler; ivh->ivh_arg = arg; ivh->ivh_vec = vec; errcode = intr_event_add_handler(ie, name, NULL, ithread_wrapper, ivh, intr_priority(flags), flags, cookiep); } else { ivh = NULL; errcode = intr_event_add_handler(ie, name, filt, NULL, arg, intr_priority(flags), flags, cookiep); } if (errcode) { if (ivh) free(ivh, M_DEVBUF); return (errcode); } pil = (filt != NULL) ? PIL_FAST : PIL_ITHREAD; intr_setup(pil, intr_fast, vec, intr_execute_handlers, iv); intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? * XXX: aren't the PIL's backwards below? */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) intr_setup(PIL_ITHREAD, intr_fast, vec, intr_stray_vector, iv); else intr_setup(PIL_LOW, intr_fast, vec, intr_execute_handlers, iv); mtx_unlock_spin(&intr_table_lock); } return (error); } /* * Allocate and register intrq fields */ static void cpu_intrq_alloc(void) { mondo_data_array = malloc(INTR_REPORT_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(mondo_data_array == NULL); cpu_list_size = CPU_LIST_SIZE > INTR_REPORT_SIZE ? CPU_LIST_SIZE : INTR_REPORT_SIZE; cpu_list_array = malloc(cpu_list_size*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(cpu_list_array == NULL); cpu_q_array = malloc(INTR_CPU_Q_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(cpu_q_array == NULL); dev_q_array = malloc(INTR_DEV_Q_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(dev_q_array == NULL); rq_array = malloc(2*CPU_RQ_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(rq_array == NULL); nrq_array = malloc(2*CPU_NRQ_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(nrq_array == NULL); } void cpu_intrq_init() { uint64_t error; pcpup->pc_mondo_data = (vm_offset_t *) ((char *)mondo_data_array + curcpu*INTR_REPORT_SIZE); pcpup->pc_mondo_data_ra = vtophys(pcpup->pc_mondo_data); pcpup->pc_cpu_q = (vm_offset_t *)((char *)cpu_q_array + curcpu*INTR_CPU_Q_SIZE); pcpup->pc_cpu_q_ra = vtophys(pcpup->pc_cpu_q); pcpup->pc_cpu_q_size = INTR_CPU_Q_SIZE; pcpup->pc_dev_q = (vm_offset_t *)((char *)dev_q_array + curcpu*INTR_DEV_Q_SIZE); pcpup->pc_dev_q_ra = vtophys(pcpup->pc_dev_q); pcpup->pc_dev_q_size = INTR_DEV_Q_SIZE; pcpup->pc_rq = (vm_offset_t *)((char *)rq_array + curcpu*2*CPU_RQ_SIZE); pcpup->pc_rq_ra = vtophys(pcpup->pc_rq); pcpup->pc_rq_size = CPU_RQ_SIZE; pcpup->pc_nrq = (vm_offset_t *)((char *)nrq_array + curcpu*2*CPU_NRQ_SIZE); pcpup->pc_nrq_ra = vtophys(pcpup->pc_nrq); pcpup->pc_nrq_size = CPU_NRQ_SIZE; error = hv_cpu_qconf(Q(CPU_MONDO_QUEUE_HEAD), pcpup->pc_cpu_q_ra, cpu_q_entries); if (error != H_EOK) panic("cpu_mondo queue configuration failed: %lu va=%p ra=0x%lx", error, pcpup->pc_cpu_q, pcpup->pc_cpu_q_ra); error = hv_cpu_qconf(Q(DEV_MONDO_QUEUE_HEAD), pcpup->pc_dev_q_ra, dev_q_entries); if (error != H_EOK) panic("dev_mondo queue configuration failed: %lu", error); error = hv_cpu_qconf(Q(RESUMABLE_ERROR_QUEUE_HEAD), pcpup->pc_rq_ra, CPU_RQ_ENTRIES); if (error != H_EOK) panic("resumable error queue configuration failed: %lu", error); error = hv_cpu_qconf(Q(NONRESUMABLE_ERROR_QUEUE_HEAD), pcpup->pc_nrq_ra, CPU_NRQ_ENTRIES); if (error != H_EOK) panic("non-resumable error queue configuration failed: %lu", error); } diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h index b110374a6b28..54cd0e4362a9 100644 --- a/sys/sys/interrupt.h +++ b/sys/sys/interrupt.h @@ -1,155 +1,160 @@ /*- * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_INTERRUPT_H_ #define _SYS_INTERRUPT_H_ #include #include struct intr_event; struct intr_thread; struct trapframe; /* * Describe a hardware interrupt handler. * * Multiple interrupt handlers for a specific event can be chained * together. */ struct intr_handler { driver_filter_t *ih_filter; /* Filter function. */ driver_intr_t *ih_handler; /* Handler function. */ void *ih_argument; /* Argument to pass to handler. */ int ih_flags; const char *ih_name; /* Name of handler. */ struct intr_event *ih_event; /* Event we are connected to. */ int ih_need; /* Needs service. */ TAILQ_ENTRY(intr_handler) ih_next; /* Next handler for this event. */ u_char ih_pri; /* Priority of this handler. */ struct intr_thread *ih_thread; /* Ithread for filtered handler. */ }; /* Interrupt handle flags kept in ih_flags */ #define IH_EXCLUSIVE 0x00000002 /* Exclusive interrupt. */ #define IH_ENTROPY 0x00000004 /* Device is a good entropy source. */ #define IH_DEAD 0x00000008 /* Handler should be removed. */ #define IH_MPSAFE 0x80000000 /* Handler does not need Giant. */ /* * Describe an interrupt event. An event holds a list of handlers. */ struct intr_event { TAILQ_ENTRY(intr_event) ie_list; TAILQ_HEAD(, intr_handler) ie_handlers; /* Interrupt handlers. */ char ie_name[MAXCOMLEN]; /* Individual event name. */ char ie_fullname[MAXCOMLEN]; struct mtx ie_lock; void *ie_source; /* Cookie used by MD code. */ struct intr_thread *ie_thread; /* Thread we are connected to. */ void (*ie_enable)(void *); + int (*ie_assign_cpu)(void *, u_char); #ifdef INTR_FILTER void (*ie_eoi)(void *); void (*ie_disab)(void *); #endif int ie_flags; int ie_count; /* Loop counter. */ int ie_warncnt; /* Rate-check interrupt storm warns. */ struct timeval ie_warntm; + u_char ie_cpu; /* CPU this event is bound to. */ }; /* Interrupt event flags kept in ie_flags. */ #define IE_SOFT 0x000001 /* Software interrupt. */ #define IE_ENTROPY 0x000002 /* Interrupt is an entropy source. */ #define IE_ADDING_THREAD 0x000004 /* Currently building an ithread. */ /* Flags to pass to sched_swi. */ #define SWI_DELAY 0x2 /* * Software interrupt numbers in priority order. The priority determines * the priority of the corresponding interrupt thread. */ #define SWI_TTY 0 #define SWI_NET 1 #define SWI_CAMBIO 2 #define SWI_VM 3 #define SWI_CLOCK 4 #define SWI_TQ_FAST 5 #define SWI_TQ 6 #define SWI_TQ_GIANT 6 extern struct intr_event *tty_intr_event; extern struct intr_event *clk_intr_event; extern void *softclock_ih; extern void *vm_ih; /* Counts and names for statistics (defined in MD code). */ extern u_long eintrcnt[]; /* end of intrcnt[] */ extern char eintrnames[]; /* end of intrnames[] */ extern u_long intrcnt[]; /* counts for for each device and stray */ extern char intrnames[]; /* string table containing device names */ #ifdef DDB void db_dump_intr_event(struct intr_event *ie, int handlers); #endif #ifdef INTR_FILTER int intr_filter_loop(struct intr_event *ie, struct trapframe *frame, struct intr_thread **ithd); int intr_event_handle(struct intr_event *ie, struct trapframe *frame); #endif u_char intr_priority(enum intr_type flags); int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep); +int intr_event_bind(struct intr_event *ie, u_char cpu); #ifndef INTR_FILTER int intr_event_create(struct intr_event **event, void *source, - int flags, void (*enable)(void *), const char *fmt, ...) - __printflike(5, 6); + int flags, void (*enable)(void *), + int (*assign_cpu)(void *, u_char), const char *fmt, ...) + __printflike(6, 7); #else int intr_event_create(struct intr_event **event, void *source, int flags, void (*enable)(void *), void (*eoi)(void *), - void (*disab)(void *), const char *fmt, ...) - __printflike(7, 8); + void (*disab)(void *), int (*assign_cpu)(void *, u_char), + const char *fmt, ...) + __printflike(8, 9); #endif int intr_event_destroy(struct intr_event *ie); int intr_event_remove_handler(void *cookie); #ifndef INTR_FILTER int intr_event_schedule_thread(struct intr_event *ie); #else int intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *ithd); #endif void *intr_handler_source(void *cookie); int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep); void swi_sched(void *cookie, int flags); int swi_remove(void *cookie); #endif