Index: head/sys/amd64/amd64/intr_machdep.c =================================================================== --- head/sys/amd64/amd64/intr_machdep.c (revision 170161) +++ head/sys/amd64/amd64/intr_machdep.c (revision 170162) @@ -1,594 +1,603 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for amd64. For amd64, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_atpic.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifndef DEV_ATPIC #include #include #include #include #include #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct sx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; #ifdef INTR_FILTER static void intr_eoi_src(void *arg); static void intr_disab_eoi_src(void *arg); static void intr_event_stray(void *cookie); #endif #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intsrc *isrc); #endif static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; STAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; sx_xlock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { STAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } sx_xunlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); #ifdef INTR_FILTER error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector); #else error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector); #endif if (error) return (error); sx_xlock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; sx_xunlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { sx_xlock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { #ifdef SMP if (assign_cpu) intr_assign_next_cpu(isrc); #endif isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } sx_xunlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; int error; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { sx_xlock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); sx_xunlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } #ifdef INTR_FILTER void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; int vector; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_LAZY_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; if (intr_event_handle(ie, frame) != 0) intr_event_stray(isrc); } static void intr_event_stray(void *cookie) { struct intsrc *isrc; isrc = cookie; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc)); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", isrc->is_pic->pic_vector(isrc)); } static void intr_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_eoi_source(isrc); } static void intr_disab_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } #else void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; struct intr_handler *ih; - int error, vector, thread; + int error, vector, thread, ret; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_LAZY_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); return; } /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ td->td_intr_nesting_level++; + ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument == NULL ? frame : ih->ih_argument, ih->ih_name); if (ih->ih_argument == NULL) - ih->ih_filter(frame); + ret = ih->ih_filter(frame); else - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } /* * If there are any threaded handlers that need to run, * mask the source as well as sending it an EOI. Otherwise, * just send it an EOI but leave it unmasked. */ if (thread) isrc->is_pic->pic_disable_source(isrc, PIC_EOI); else isrc->is_pic->pic_eoi_source(isrc); critical_exit(); /* Schedule the ithread if needed. */ if (thread) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); } td->td_intr_nesting_level--; } #endif void intr_resume(void) { struct pic *pic; #ifndef DEV_ATPIC atpic_reset(); #endif sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic); } sx_xunlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } sx_xunlock(&intr_table_lock); } static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; STAILQ_INIT(&pics); sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL) #ifndef DEV_ATPIC /* Initialize the two 8259A's to a known-good shutdown state. */ void atpic_reset(void) { outb(IO_ICU1, ICW1_RESET | ICW1_IC4); outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); outb(IO_ICU1 + ICU_IMR_OFFSET, 1 << 2); outb(IO_ICU1 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU1, OCW3_SEL | OCW3_RR); outb(IO_ICU2, ICW1_RESET | ICW1_IC4); outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); outb(IO_ICU2 + ICU_IMR_OFFSET, 2); outb(IO_ICU2 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU2, OCW3_SEL | OCW3_RR); } #endif #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ /* The BSP is always a valid target. */ static cpumask_t intr_cpus = (1 << 0); static int current_cpu, num_cpus = 1; static void intr_assign_next_cpu(struct intsrc *isrc) { struct pic *pic; u_int apic_id; /* * Assign this source to a local APIC in a round-robin fashion. */ pic = isrc->is_pic; apic_id = cpu_apic_ids[current_cpu]; pic->pic_assign_cpu(isrc, apic_id); do { current_cpu++; if (current_cpu >= num_cpus) current_cpu = 0; } while (!(intr_cpus & (1 << current_cpu))); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); intr_cpus |= (1 << cpu); num_cpus++; } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (num_cpus <= 1) return; /* Round-robin assign a CPU to each enabled source. */ sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; if (isrc != NULL && isrc->is_handlers > 0) intr_assign_next_cpu(isrc); } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL) #endif Index: head/sys/arm/arm/intr.c =================================================================== --- head/sys/arm/arm/intr.c (revision 170161) +++ head/sys/arm/arm/intr.c (revision 170162) @@ -1,135 +1,144 @@ /* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard. * Copyright (c) 1994-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Soft interrupt and other generic interrupt functions. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include static struct intr_event *intr_events[NIRQ]; static int intrcnt_tab[NIRQ]; static int intrcnt_index = 0; static int last_printed = 0; void arm_handler_execute(struct trapframe *, int); void arm_setup_irqhandler(const char *name, driver_filter_t *filt, void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; if (irq < 0 || irq >= NIRQ) return; event = intr_events[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, (void (*)(void *))arm_unmask_irq, "intr%d:", irq); if (error) return; intr_events[irq] = event; last_printed += snprintf(intrnames + last_printed, MAXCOMLEN + 1, "irq%d: %s", irq, name); last_printed++; intrcnt_tab[irq] = intrcnt_index; intrcnt_index++; } intr_event_add_handler(event, name, filt, hand, arg, intr_priority(flags), flags, cookiep); } int arm_remove_irqhandler(void *cookie) { return (intr_event_remove_handler(cookie)); } void dosoftints(void); void dosoftints(void) { } void arm_handler_execute(struct trapframe *frame, int irqnb) { struct intr_event *event; struct intr_handler *ih; struct thread *td = curthread; - int i, thread; + int i, thread, ret; PCPU_LAZY_INC(cnt.v_intr); td->td_intr_nesting_level++; while ((i = arm_get_next_irq()) != -1) { arm_mask_irq(i); intrcnt[intrcnt_tab[i]]++; event = intr_events[i]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) continue; /* Execute fast handlers. */ + ret = 0; thread = 0; TAILQ_FOREACH(ih, &event->ie_handlers, ih_next) { if (ih->ih_filter == NULL) thread = 1; else - ih->ih_filter(ih->ih_argument ? + ret = ih->ih_filter(ih->ih_argument ? ih->ih_argument : frame); + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } /* Schedule thread if needed. */ if (thread) intr_event_schedule_thread(event); else arm_unmask_irq(i); } td->td_intr_nesting_level--; } Index: head/sys/i386/i386/intr_machdep.c =================================================================== --- head/sys/i386/i386/intr_machdep.c (revision 170161) +++ head/sys/i386/i386/intr_machdep.c (revision 170162) @@ -1,560 +1,579 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for i386. For the i386, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct sx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; #ifdef INTR_FILTER static void intr_eoi_src(void *arg); static void intr_disab_eoi_src(void *arg); static void intr_event_stray(void *cookie); #endif #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intsrc *isrc); #endif static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; STAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; sx_xlock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { STAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } sx_xunlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); #ifdef INTR_FILTER error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector); #else error = intr_event_create(&isrc->is_event, isrc, 0, (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector); #endif if (error) return (error); sx_xlock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; sx_xunlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { sx_xlock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { #ifdef SMP if (assign_cpu) intr_assign_next_cpu(isrc); #endif isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } sx_xunlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; int error; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { sx_xlock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); sx_xunlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } #ifdef INTR_FILTER void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; int vector; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_LAZY_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; if (intr_event_handle(ie, frame) != 0) intr_event_stray(isrc); } static void intr_event_stray(void *cookie) { struct intsrc *isrc; isrc = cookie; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc)); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", isrc->is_pic->pic_vector(isrc)); } static void intr_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_eoi_source(isrc); } static void intr_disab_eoi_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } #else void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct thread *td; struct intr_event *ie; struct intr_handler *ih; - int error, vector, thread; + int error, vector, thread, ret; td = curthread; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_LAZY_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); return; } /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ td->td_intr_nesting_level++; + ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument == NULL ? frame : ih->ih_argument, ih->ih_name); if (ih->ih_argument == NULL) - ih->ih_filter(frame); + ret = ih->ih_filter(frame); else - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); + /* + * Wrapper handler special handling: + * + * in some particular cases (like pccard and pccbb), + * the _real_ device handler is wrapped in a couple of + * functions - a filter wrapper and an ithread wrapper. + * In this case (and just in this case), the filter wrapper + * could ask the system to schedule the ithread and mask + * the interrupt source if the wrapped handler is composed + * of just an ithread handler. + * + * TODO: write a generic wrapper to avoid people rolling + * their own + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } /* * If there are any threaded handlers that need to run, * mask the source as well as sending it an EOI. Otherwise, * just send it an EOI but leave it unmasked. */ if (thread) isrc->is_pic->pic_disable_source(isrc, PIC_EOI); else isrc->is_pic->pic_eoi_source(isrc); critical_exit(); /* Schedule the ithread if needed. */ if (thread) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); } td->td_intr_nesting_level--; } #endif void intr_resume(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic); } sx_xunlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; sx_xlock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } sx_xunlock(&intr_table_lock); } static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; STAILQ_INIT(&pics); sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL) #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ /* The BSP is always a valid target. */ static cpumask_t intr_cpus = (1 << 0); static int current_cpu, num_cpus = 1; static void intr_assign_next_cpu(struct intsrc *isrc) { struct pic *pic; u_int apic_id; /* * Assign this source to a local APIC in a round-robin fashion. */ pic = isrc->is_pic; apic_id = cpu_apic_ids[current_cpu]; pic->pic_assign_cpu(isrc, apic_id); do { current_cpu++; if (current_cpu >= num_cpus) current_cpu = 0; } while (!(intr_cpus & (1 << current_cpu))); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); intr_cpus |= (1 << cpu); num_cpus++; } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (num_cpus <= 1) return; /* Round-robin assign a CPU to each enabled source. */ sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; if (isrc != NULL && isrc->is_handlers > 0) intr_assign_next_cpu(isrc); } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL) #endif Index: head/sys/ia64/ia64/interrupt.c =================================================================== --- head/sys/ia64/ia64/interrupt.c (revision 170161) +++ head/sys/ia64/ia64/interrupt.c (revision 170162) @@ -1,431 +1,440 @@ /* $FreeBSD$ */ /* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */ /*- * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. * All rights reserved. * * Authors: Keith Bostic, Chris G. Demetriou * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /*- * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center. * Redistribute and modify at will, leaving only this additional copyright * notice. */ #include "opt_ddb.h" #include /* RCS ID & Copyright macro defns */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EVCNT_COUNTERS struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */ #else #include #include #endif #ifdef DDB #include #endif #ifdef SMP extern int mp_ipi_test; #endif volatile int mc_expected, mc_received; static void dummy_perf(unsigned long vector, struct trapframe *tf) { printf("performance interrupt!\n"); } void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf; static unsigned int ints[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, ints, CTLFLAG_RW, &ints, sizeof(ints), "IU", ""); static unsigned int clks[MAXCPU]; #ifdef SMP SYSCTL_OPAQUE(_debug, OID_AUTO, clks, CTLFLAG_RW, &clks, sizeof(clks), "IU", ""); #else SYSCTL_INT(_debug, OID_AUTO, clks, CTLFLAG_RW, clks, 0, ""); #endif #ifdef SMP static unsigned int asts[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, asts, CTLFLAG_RW, &asts, sizeof(asts), "IU", ""); static unsigned int rdvs[MAXCPU]; SYSCTL_OPAQUE(_debug, OID_AUTO, rdvs, CTLFLAG_RW, &rdvs, sizeof(rdvs), "IU", ""); #endif SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics"); static int adjust_edges = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_edges, CTLFLAG_RD, &adjust_edges, 0, "Number of times ITC got more than 12.5% behind"); static int adjust_excess = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_excess, CTLFLAG_RD, &adjust_excess, 0, "Total number of ignored ITC interrupts"); static int adjust_lost = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_lost, CTLFLAG_RD, &adjust_lost, 0, "Total number of lost ITC interrupts"); static int adjust_ticks = 0; SYSCTL_INT(_debug_clock, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks, 0, "Total number of ITC interrupts with adjustment"); int interrupt(u_int64_t vector, struct trapframe *tf) { struct thread *td; volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; uint64_t adj, clk, itc; int64_t delta; int count; ia64_set_fpsr(IA64_FPSR_DEFAULT); td = curthread; atomic_add_int(&td->td_intr_nesting_level, 1); /* * Handle ExtINT interrupts by generating an INTA cycle to * read the vector. */ if (vector == 0) { vector = ib->ib_inta; printf("ExtINT interrupt: vector=%ld\n", vector); if (vector == 15) goto stray; } if (vector == CLOCK_VECTOR) {/* clock interrupt */ /* CTR0(KTR_INTR, "clock interrupt"); */ PCPU_LAZY_INC(cnt.v_intr); #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; #else intrcnt[INTRCNT_CLOCK]++; #endif clks[PCPU_GET(cpuid)]++; critical_enter(); adj = PCPU_GET(clockadj); itc = ia64_get_itc(); ia64_set_itm(itc + ia64_clock_reload - adj); clk = PCPU_GET(clock); delta = itc - clk; count = 0; while (delta >= ia64_clock_reload) { /* Only the BSP runs the real clock */ if (PCPU_GET(cpuid) == 0) hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf)); else hardclock_cpu(TRAPF_USERMODE(tf)); if (profprocs != 0) profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf)); statclock(TRAPF_USERMODE(tf)); delta -= ia64_clock_reload; clk += ia64_clock_reload; if (adj != 0) adjust_ticks++; count++; } if (count > 0) { adjust_lost += count - 1; if (delta > (ia64_clock_reload >> 3)) { if (adj == 0) adjust_edges++; adj = ia64_clock_reload >> 4; } else adj = 0; } else { adj = 0; adjust_excess++; } PCPU_SET(clock, clk); PCPU_SET(clockadj, adj); critical_exit(); #ifdef SMP } else if (vector == ipi_vector[IPI_AST]) { asts[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); } else if (vector == ipi_vector[IPI_HIGH_FP]) { struct thread *thr = PCPU_GET(fpcurthread); if (thr != NULL) { mtx_lock_spin(&thr->td_md.md_highfp_mtx); save_high_fp(&thr->td_pcb->pcb_high_fp); thr->td_pcb->pcb_fpcpu = NULL; PCPU_SET(fpcurthread, NULL); mtx_unlock_spin(&thr->td_md.md_highfp_mtx); } } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { rdvs[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); } else if (vector == ipi_vector[IPI_STOP]) { register_t intr; cpumask_t mybit = PCPU_GET(cpumask); intr = intr_disable(); savectx(PCPU_PTR(pcb)); atomic_set_int(&stopped_cpus, mybit); while ((started_cpus & mybit) == 0) /* spin */; atomic_clear_int(&started_cpus, mybit); atomic_clear_int(&stopped_cpus, mybit); intr_restore(intr); } else if (vector == ipi_vector[IPI_TEST]) { CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid)); mp_ipi_test++; #endif } else { ints[PCPU_GET(cpuid)]++; ia64_dispatch_intr(tf, vector); } stray: atomic_subtract_int(&td->td_intr_nesting_level, 1); return (TRAPF_USERMODE(tf)); } /* * Hardware irqs have vectors starting at this offset. */ #define IA64_HARDWARE_IRQ_BASE 0x20 struct ia64_intr { struct intr_event *event; /* interrupt event */ volatile long *cntp; /* interrupt counter */ }; static struct mtx ia64_intrs_lock; static struct ia64_intr *ia64_intrs[256]; extern struct sapic *ia64_sapics[]; extern int ia64_sapic_count; static void ithds_init(void *dummy) { mtx_init(&ia64_intrs_lock, "intr table", NULL, MTX_SPIN); } SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL); static void ia64_send_eoi(uintptr_t vector) { int irq, i; irq = vector - IA64_HARDWARE_IRQ_BASE; for (i = 0; i < ia64_sapic_count; i++) { struct sapic *sa = ia64_sapics[i]; if (irq >= sa->sa_base && irq <= sa->sa_limit) sapic_eoi(sa, vector); } } int ia64_setup_intr(const char *name, int irq, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep, volatile long *cntp) { struct ia64_intr *i; int errcode; intptr_t vector = irq + IA64_HARDWARE_IRQ_BASE; char *intrname; /* * XXX - Can we have more than one device on a vector? If so, we have * a race condition here that needs to be worked around similar to * the fashion done in the i386 inthand_add() function. */ /* First, check for an existing hash table entry for this vector. */ mtx_lock_spin(&ia64_intrs_lock); i = ia64_intrs[vector]; mtx_unlock_spin(&ia64_intrs_lock); if (i == NULL) { /* None was found, so create an entry. */ i = malloc(sizeof(struct ia64_intr), M_DEVBUF, M_NOWAIT); if (i == NULL) return ENOMEM; if (cntp == NULL) i->cntp = intrcnt + irq + INTRCNT_ISA_IRQ; else i->cntp = cntp; if (name != NULL && *name != '\0') { /* XXX needs abstraction. Too error phrone. */ intrname = intrnames + (irq + INTRCNT_ISA_IRQ) * INTRNAME_LEN; memset(intrname, ' ', INTRNAME_LEN - 1); bcopy(name, intrname, strlen(name)); } errcode = intr_event_create(&i->event, (void *)vector, 0, (void (*)(void *))ia64_send_eoi, "intr:"); if (errcode) { free(i, M_DEVBUF); return errcode; } mtx_lock_spin(&ia64_intrs_lock); ia64_intrs[vector] = i; mtx_unlock_spin(&ia64_intrs_lock); } /* Second, add this handler. */ errcode = intr_event_add_handler(i->event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (errcode) return errcode; return (sapic_enable(irq, vector)); } int ia64_teardown_intr(void *cookie) { return (intr_event_remove_handler(cookie)); } void ia64_dispatch_intr(void *frame, unsigned long vector) { struct ia64_intr *i; struct intr_event *ie; /* our interrupt event */ struct intr_handler *ih; - int error, thread; + int error, thread, ret; /* * Find the interrupt thread for this vector. */ i = ia64_intrs[vector]; if (i == NULL) return; /* no event for this vector */ if (i->cntp) atomic_add_long(i->cntp, 1); ie = i->event; KASSERT(ie != NULL, ("interrupt vector without an event")); /* * As an optimization, if an event has no handlers, don't * schedule it to run. */ if (TAILQ_EMPTY(&ie->ie_handlers)) return; /* * Execute all fast interrupt handlers directly without Giant. Note * that this means that any fast interrupt handler must be MP safe. */ + ret = 0; thread = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument, ih->ih_name); - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } critical_exit(); if (thread) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("got an impossible stray interrupt")); } else ia64_send_eoi(vector); } #ifdef DDB static void db_show_vector(int vector) { int irq, i; irq = vector - IA64_HARDWARE_IRQ_BASE; for (i = 0; i < ia64_sapic_count; i++) { struct sapic *sa = ia64_sapics[i]; if (irq >= sa->sa_base && irq <= sa->sa_limit) sapic_print(sa, irq - sa->sa_base); } } DB_SHOW_COMMAND(irq, db_show_irq) { int vector; if (have_addr) { vector = ((addr >> 4) % 16) * 10 + (addr % 16); db_show_vector(vector); } else { for (vector = IA64_HARDWARE_IRQ_BASE; vector < IA64_HARDWARE_IRQ_BASE + 64; vector++) db_show_vector(vector); } } #endif Index: head/sys/kern/kern_intr.c =================================================================== --- head/sys/kern/kern_intr.c (revision 170161) +++ head/sys/kern/kern_intr.c (revision 170162) @@ -1,1570 +1,1567 @@ /*- * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #include #endif /* * Describe an interrupt thread. There is one of these per interrupt event. */ struct intr_thread { struct intr_event *it_event; struct thread *it_thread; /* Kernel thread. */ int it_flags; /* (j) IT_* flags. */ int it_need; /* Needs service. */ }; /* Interrupt thread flags kept in it_flags */ #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ struct intr_entropy { struct thread *td; uintptr_t event; }; struct intr_event *clk_intr_event; struct intr_event *tty_intr_event; void *softclock_ih; void *vm_ih; static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); static int intr_storm_threshold = 1000; TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, &intr_storm_threshold, 0, "Number of consecutive interrupts before storm protection is enabled"); static TAILQ_HEAD(, intr_event) event_list = TAILQ_HEAD_INITIALIZER(event_list); static void intr_event_update(struct intr_event *ie); #ifdef INTR_FILTER static struct intr_thread *ithread_create(const char *name, struct intr_handler *ih); #else static struct intr_thread *ithread_create(const char *name); #endif static void ithread_destroy(struct intr_thread *ithread); static void ithread_execute_handlers(struct proc *p, struct intr_event *ie); #ifdef INTR_FILTER static void priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih); #endif static void ithread_loop(void *); static void ithread_update(struct intr_thread *ithd); static void start_softintr(void *); /* Map an interrupt type to an ithread priority. */ u_char intr_priority(enum intr_type flags) { u_char pri; flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); switch (flags) { case INTR_TYPE_TTY: pri = PI_TTYLOW; break; case INTR_TYPE_BIO: /* * XXX We need to refine this. BSD/OS distinguishes * between tape and disk priorities. */ pri = PI_DISK; break; case INTR_TYPE_NET: pri = PI_NET; break; case INTR_TYPE_CAM: pri = PI_DISK; /* XXX or PI_CAM? */ break; case INTR_TYPE_AV: /* Audio/video */ pri = PI_AV; break; case INTR_TYPE_CLK: pri = PI_REALTIME; break; case INTR_TYPE_MISC: pri = PI_DULL; /* don't care */ break; default: /* We didn't specify an interrupt level. */ panic("intr_priority: no interrupt type in flags"); } return pri; } /* * Update an ithread based on the associated intr_event. */ static void ithread_update(struct intr_thread *ithd) { struct intr_event *ie; struct thread *td; u_char pri; ie = ithd->it_event; td = ithd->it_thread; /* Determine the overall priority of this event. */ if (TAILQ_EMPTY(&ie->ie_handlers)) pri = PRI_MAX_ITHD; else pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; /* Update name and priority. */ strlcpy(td->td_proc->p_comm, ie->ie_fullname, sizeof(td->td_proc->p_comm)); mtx_lock_spin(&sched_lock); sched_prio(td, pri); mtx_unlock_spin(&sched_lock); } /* * Regenerate the full name of an interrupt event and update its priority. */ static void intr_event_update(struct intr_event *ie) { struct intr_handler *ih; char *last; int missed, space; /* Start off with no entropy and just the name of the event. */ mtx_assert(&ie->ie_lock, MA_OWNED); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); ie->ie_flags &= ~IE_ENTROPY; missed = 0; space = 1; /* Run through all the handlers updating values. */ TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < sizeof(ie->ie_fullname)) { strcat(ie->ie_fullname, " "); strcat(ie->ie_fullname, ih->ih_name); space = 0; } else missed++; if (ih->ih_flags & IH_ENTROPY) ie->ie_flags |= IE_ENTROPY; } /* * If the handler names were too long, add +'s to indicate missing * names. If we run out of room and still have +'s to add, change * the last character from a + to a *. */ last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; while (missed-- > 0) { if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { if (*last == '+') { *last = '*'; break; } else *last = '+'; } else if (space) { strcat(ie->ie_fullname, " +"); space = 0; } else strcat(ie->ie_fullname, "+"); } /* * If this event has an ithread, update it's priority and * name. */ if (ie->ie_thread != NULL) ithread_update(ie->ie_thread); CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); } #ifndef INTR_FILTER int intr_event_create(struct intr_event **event, void *source, int flags, void (*enable)(void *), const char *fmt, ...) { struct intr_event *ie; va_list ap; /* The only valid flag during creation is IE_SOFT. */ if ((flags & ~IE_SOFT) != 0) return (EINVAL); ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); ie->ie_source = source; ie->ie_enable = enable; ie->ie_flags = flags; TAILQ_INIT(&ie->ie_handlers); mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); va_start(ap, fmt); vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); va_end(ap); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_INSERT_TAIL(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); if (event != NULL) *event = ie; CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); return (0); } #else int intr_event_create(struct intr_event **event, void *source, int flags, void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), const char *fmt, ...) { struct intr_event *ie; va_list ap; /* The only valid flag during creation is IE_SOFT. */ if ((flags & ~IE_SOFT) != 0) return (EINVAL); ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); ie->ie_source = source; ie->ie_enable = enable; ie->ie_eoi = eoi; ie->ie_disab = disab; ie->ie_flags = flags; TAILQ_INIT(&ie->ie_handlers); mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); va_start(ap, fmt); vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); va_end(ap); strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_INSERT_TAIL(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); if (event != NULL) *event = ie; CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); return (0); } #endif int intr_event_destroy(struct intr_event *ie) { mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { mtx_unlock(&ie->ie_lock); return (EBUSY); } mtx_pool_lock(mtxpool_sleep, &event_list); TAILQ_REMOVE(&event_list, ie, ie_list); mtx_pool_unlock(mtxpool_sleep, &event_list); #ifndef notyet if (ie->ie_thread != NULL) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); mtx_destroy(&ie->ie_lock); free(ie, M_ITHREAD); return (0); } #ifndef INTR_FILTER static struct intr_thread * ithread_create(const char *name) { struct intr_thread *ithd; struct thread *td; struct proc *p; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 0, "%s", name); if (error) panic("kthread_create() failed with %d", error); td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ mtx_lock_spin(&sched_lock); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); mtx_unlock_spin(&sched_lock); td->td_pflags |= TDP_ITHREAD; ithd->it_thread = td; CTR2(KTR_INTR, "%s: created %s", __func__, name); return (ithd); } #else static struct intr_thread * ithread_create(const char *name, struct intr_handler *ih) { struct intr_thread *ithd; struct thread *td; struct proc *p; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); error = kthread_create(ithread_loop, ih, &p, RFSTOPPED | RFHIGHPID, 0, "%s", name); if (error) panic("kthread_create() failed with %d", error); td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ mtx_lock_spin(&sched_lock); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); mtx_unlock_spin(&sched_lock); td->td_pflags |= TDP_ITHREAD; ithd->it_thread = td; CTR2(KTR_INTR, "%s: created %s", __func__, name); return (ithd); } #endif static void ithread_destroy(struct intr_thread *ithread) { struct thread *td; CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); td = ithread->it_thread; mtx_lock_spin(&sched_lock); ithread->it_flags |= IT_DEAD; if (TD_AWAITING_INTR(td)) { TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } mtx_unlock_spin(&sched_lock); } #ifndef INTR_FILTER int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep) { struct intr_handler *ih, *temp_ih; struct intr_thread *it; if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) return (EINVAL); /* Allocate and populate an interrupt handler structure. */ ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); ih->ih_filter = filter; ih->ih_handler = handler; ih->ih_argument = arg; ih->ih_name = name; ih->ih_event = ie; ih->ih_pri = pri; if (flags & INTR_EXCL) ih->ih_flags = IH_EXCLUSIVE; if (flags & INTR_MPSAFE) ih->ih_flags |= IH_MPSAFE; if (flags & INTR_ENTROPY) ih->ih_flags |= IH_ENTROPY; /* We can only have one exclusive handler in a event. */ mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { if ((flags & INTR_EXCL) || (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { mtx_unlock(&ie->ie_lock); free(ih, M_ITHREAD); return (EINVAL); } } /* Add the new handler to the event in priority order. */ TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { if (temp_ih->ih_pri > ih->ih_pri) break; } if (temp_ih == NULL) TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); else TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); intr_event_update(ie); /* Create a thread if we need one. */ while (ie->ie_thread == NULL && handler != NULL) { if (ie->ie_flags & IE_ADDING_THREAD) msleep(ie, &ie->ie_lock, 0, "ithread", 0); else { ie->ie_flags |= IE_ADDING_THREAD; mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn"); mtx_lock(&ie->ie_lock); ie->ie_flags &= ~IE_ADDING_THREAD; ie->ie_thread = it; it->it_event = ie; ithread_update(it); wakeup(ie); } } CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, ie->ie_name); mtx_unlock(&ie->ie_lock); if (cookiep != NULL) *cookiep = ih; return (0); } #else int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep) { struct intr_handler *ih, *temp_ih; struct intr_thread *it; if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) return (EINVAL); /* Allocate and populate an interrupt handler structure. */ ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); ih->ih_filter = filter; ih->ih_handler = handler; ih->ih_argument = arg; ih->ih_name = name; ih->ih_event = ie; ih->ih_pri = pri; if (flags & INTR_EXCL) ih->ih_flags = IH_EXCLUSIVE; if (flags & INTR_MPSAFE) ih->ih_flags |= IH_MPSAFE; if (flags & INTR_ENTROPY) ih->ih_flags |= IH_ENTROPY; /* We can only have one exclusive handler in a event. */ mtx_lock(&ie->ie_lock); if (!TAILQ_EMPTY(&ie->ie_handlers)) { if ((flags & INTR_EXCL) || (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { mtx_unlock(&ie->ie_lock); free(ih, M_ITHREAD); return (EINVAL); } } /* Add the new handler to the event in priority order. */ TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { if (temp_ih->ih_pri > ih->ih_pri) break; } if (temp_ih == NULL) TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); else TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); intr_event_update(ie); /* For filtered handlers, create a private ithread to run on. */ if (filter != NULL && handler != NULL) { mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn", ih); mtx_lock(&ie->ie_lock); it->it_event = ie; ih->ih_thread = it; ithread_update(it); // XXX - do we really need this?!?!? } else { /* Create the global per-event thread if we need one. */ while (ie->ie_thread == NULL && handler != NULL) { if (ie->ie_flags & IE_ADDING_THREAD) msleep(ie, &ie->ie_lock, 0, "ithread", 0); else { ie->ie_flags |= IE_ADDING_THREAD; mtx_unlock(&ie->ie_lock); it = ithread_create("intr: newborn", ih); mtx_lock(&ie->ie_lock); ie->ie_flags &= ~IE_ADDING_THREAD; ie->ie_thread = it; it->it_event = ie; ithread_update(it); wakeup(ie); } } } CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, ie->ie_name); mtx_unlock(&ie->ie_lock); if (cookiep != NULL) *cookiep = ih; return (0); } #endif /* * Return the ie_source field from the intr_event an intr_handler is * associated with. */ void * intr_handler_source(void *cookie) { struct intr_handler *ih; struct intr_event *ie; ih = (struct intr_handler *)cookie; if (ih == NULL) return (NULL); ie = ih->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", ih->ih_name)); return (ie->ie_source); } #ifndef INTR_FILTER int intr_event_remove_handler(void *cookie) { struct intr_handler *handler = (struct intr_handler *)cookie; struct intr_event *ie; #ifdef INVARIANTS struct intr_handler *ih; #endif #ifdef notyet int dead; #endif if (handler == NULL) return (EINVAL); ie = handler->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", handler->ih_name)); mtx_lock(&ie->ie_lock); CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, ie->ie_name); #ifdef INVARIANTS TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) if (ih == handler) goto ok; mtx_unlock(&ie->ie_lock); panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", ih->ih_name, ie->ie_name); ok: #endif /* * If there is no ithread, then just remove the handler and return. * XXX: Note that an INTR_FAST handler might be running on another * CPU! */ if (ie->ie_thread == NULL) { TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } /* * If the interrupt thread is already running, then just mark this * handler as being dead and let the ithread do the actual removal. * * During a cold boot while cold is set, msleep() does not sleep, * so we have to remove the handler here rather than letting the * thread do it. */ mtx_lock_spin(&sched_lock); if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { handler->ih_flags |= IH_DEAD; /* * Ensure that the thread will process the handler list * again and remove this handler if it has already passed * it on the list. */ ie->ie_thread->it_need = 1; } else TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock_spin(&sched_lock); while (handler->ih_flags & IH_DEAD) msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); intr_event_update(ie); #ifdef notyet /* * XXX: This could be bad in the case of ppbus(8). Also, I think * this could lead to races of stale data when servicing an * interrupt. */ dead = 1; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (!(ih->ih_flags & IH_FAST)) { dead = 0; break; } } if (dead) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } int intr_event_schedule_thread(struct intr_event *ie) { struct intr_entropy entropy; struct intr_thread *it; struct thread *td; struct thread *ctd; struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || ie->ie_thread == NULL) return (EINVAL); ctd = curthread; it = ie->ie_thread; td = it->it_thread; p = td->td_proc; /* * If any of the handlers for this ithread claim to be good * sources of entropy, then gather some. */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, p->p_pid, p->p_comm); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, RANDOM_INTERRUPT); } KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already * running. Then, grab sched_lock and see if we actually need to * put this thread on the runqueue. */ it->it_need = 1; mtx_lock_spin(&sched_lock); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, p->p_comm); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); } mtx_unlock_spin(&sched_lock); return (0); } #else int intr_event_remove_handler(void *cookie) { struct intr_handler *handler = (struct intr_handler *)cookie; struct intr_event *ie; struct intr_thread *it; #ifdef INVARIANTS struct intr_handler *ih; #endif #ifdef notyet int dead; #endif if (handler == NULL) return (EINVAL); ie = handler->ih_event; KASSERT(ie != NULL, ("interrupt handler \"%s\" has a NULL interrupt event", handler->ih_name)); mtx_lock(&ie->ie_lock); CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, ie->ie_name); #ifdef INVARIANTS TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) if (ih == handler) goto ok; mtx_unlock(&ie->ie_lock); panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", ih->ih_name, ie->ie_name); ok: #endif /* * If there are no ithreads (per event and per handler), then * just remove the handler and return. * XXX: Note that an INTR_FAST handler might be running on another CPU! */ if (ie->ie_thread == NULL && handler->ih_thread == NULL) { TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } /* Private or global ithread? */ it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; /* * If the interrupt thread is already running, then just mark this * handler as being dead and let the ithread do the actual removal. * * During a cold boot while cold is set, msleep() does not sleep, * so we have to remove the handler here rather than letting the * thread do it. */ mtx_lock_spin(&sched_lock); if (!TD_AWAITING_INTR(it->it_thread) && !cold) { handler->ih_flags |= IH_DEAD; /* * Ensure that the thread will process the handler list * again and remove this handler if it has already passed * it on the list. */ it->it_need = 1; } else TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); mtx_unlock_spin(&sched_lock); while (handler->ih_flags & IH_DEAD) msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); /* * At this point, the handler has been disconnected from the event, * so we can kill the private ithread if any. */ if (handler->ih_thread) { ithread_destroy(handler->ih_thread); handler->ih_thread = NULL; } intr_event_update(ie); #ifdef notyet /* * XXX: This could be bad in the case of ppbus(8). Also, I think * this could lead to races of stale data when servicing an * interrupt. */ dead = 1; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (handler != NULL) { dead = 0; break; } } if (dead) { ithread_destroy(ie->ie_thread); ie->ie_thread = NULL; } #endif mtx_unlock(&ie->ie_lock); free(handler, M_ITHREAD); return (0); } int intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) { struct intr_entropy entropy; struct thread *td; struct thread *ctd; struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) return (EINVAL); ctd = curthread; td = it->it_thread; p = td->td_proc; /* * If any of the handlers for this ithread claim to be good * sources of entropy, then gather some. */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, p->p_pid, p->p_comm); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, RANDOM_INTERRUPT); } KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already * running. Then, grab sched_lock and see if we actually need to * put this thread on the runqueue. */ it->it_need = 1; mtx_lock_spin(&sched_lock); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, p->p_comm); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); } mtx_unlock_spin(&sched_lock); return (0); } #endif /* * Add a software interrupt handler to a specified event. If a given event * is not specified, then a new event is created. */ int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep) { struct intr_event *ie; int error; if (flags & INTR_ENTROPY) return (EINVAL); ie = (eventp != NULL) ? *eventp : NULL; if (ie != NULL) { if (!(ie->ie_flags & IE_SOFT)) return (EINVAL); } else { #ifdef INTR_FILTER error = intr_event_create(&ie, NULL, IE_SOFT, NULL, NULL, NULL, "swi%d:", pri); #else error = intr_event_create(&ie, NULL, IE_SOFT, NULL, "swi%d:", pri); #endif if (error) return (error); if (eventp != NULL) *eventp = ie; } return (intr_event_add_handler(ie, name, NULL, handler, arg, (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); /* XXKSE.. think of a better way to get separate queues */ } /* * Schedule a software interrupt thread. */ void swi_sched(void *cookie, int flags) { struct intr_handler *ih = (struct intr_handler *)cookie; struct intr_event *ie = ih->ih_event; int error; CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, ih->ih_need); /* * Set ih_need for this handler so that if the ithread is already * running it will execute this handler on the next pass. Otherwise, * it will execute it the next time it runs. */ atomic_store_rel_int(&ih->ih_need, 1); if (!(flags & SWI_DELAY)) { PCPU_LAZY_INC(cnt.v_soft); #ifdef INTR_FILTER error = intr_event_schedule_thread(ie, ie->ie_thread); #else error = intr_event_schedule_thread(ie); #endif KASSERT(error == 0, ("stray software interrupt")); } } /* * Remove a software interrupt handler. Currently this code does not * remove the associated interrupt event if it becomes empty. Calling code * may do so manually via intr_event_destroy(), but that's not really * an optimal interface. */ int swi_remove(void *cookie) { return (intr_event_remove_handler(cookie)); } #ifdef INTR_FILTER static void priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) { struct intr_event *ie; ie = ih->ih_event; /* * If this handler is marked for death, remove it from * the list of handlers and wake up the sleeper. */ if (ih->ih_flags & IH_DEAD) { mtx_lock(&ie->ie_lock); TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); ih->ih_flags &= ~IH_DEAD; wakeup(ih); mtx_unlock(&ie->ie_lock); return; } /* Execute this handler. */ CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, ih->ih_name, ih->ih_flags); if (!(ih->ih_flags & IH_MPSAFE)) mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if (!(ih->ih_flags & IH_MPSAFE)) mtx_unlock(&Giant); } #endif static void ithread_execute_handlers(struct proc *p, struct intr_event *ie) { struct intr_handler *ih, *ihn; /* Interrupt handlers should not sleep. */ if (!(ie->ie_flags & IE_SOFT)) THREAD_NO_SLEEPING(); TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { /* * If this handler is marked for death, remove it from * the list of handlers and wake up the sleeper. */ if (ih->ih_flags & IH_DEAD) { mtx_lock(&ie->ie_lock); TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); ih->ih_flags &= ~IH_DEAD; wakeup(ih); mtx_unlock(&ie->ie_lock); continue; } /* Skip filter only handlers */ if (ih->ih_handler == NULL) continue; /* * For software interrupt threads, we only execute * handlers that have their need flag set. Hardware * interrupt threads always invoke all of their handlers. */ if (ie->ie_flags & IE_SOFT) { if (!ih->ih_need) continue; else atomic_store_rel_int(&ih->ih_need, 0); } /* Execute this handler. */ CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, ih->ih_name, ih->ih_flags); if (!(ih->ih_flags & IH_MPSAFE)) mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if (!(ih->ih_flags & IH_MPSAFE)) mtx_unlock(&Giant); } if (!(ie->ie_flags & IE_SOFT)) THREAD_SLEEPING_OK(); /* * Interrupt storm handling: * * If this interrupt source is currently storming, then throttle * it to only fire the handler once per clock tick. * * If this interrupt source is not currently storming, but the * number of back to back interrupts exceeds the storm threshold, * then enter storming mode. */ if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && !(ie->ie_flags & IE_SOFT)) { /* Report the message only once every second. */ if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { printf( "interrupt storm detected on \"%s\"; throttling interrupt source\n", ie->ie_name); } pause("istorm", 1); } else ie->ie_count++; /* * Now that all the handlers have had a chance to run, reenable * the interrupt source. */ if (ie->ie_enable != NULL) ie->ie_enable(ie->ie_source); } #ifndef INTR_FILTER /* * This is the main code for interrupt threads. */ static void ithread_loop(void *arg) { struct intr_thread *ithd; struct intr_event *ie; struct thread *td; struct proc *p; td = curthread; p = td->td_proc; ithd = (struct intr_thread *)arg; KASSERT(ithd->it_thread == td, ("%s: ithread and proc linkage out of sync", __func__)); ie = ithd->it_event; ie->ie_count = 0; /* * As long as we have interrupts outstanding, go through the * list of handlers, giving each one a go at it. */ for (;;) { /* * If we are an orphaned thread, then just die. */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, p->p_pid, p->p_comm); free(ithd, M_ITHREAD); kthread_exit(0); } /* * Service interrupts. If another interrupt arrives while * we are running, it will set it_need to note that we * should make another pass. */ while (ithd->it_need) { /* * This might need a full read and write barrier * to make sure that this write posts before any * of the memory or device accesses in the * handlers. */ atomic_store_rel_int(&ithd->it_need, 0); ithread_execute_handlers(p, ie); } WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); mtx_assert(&Giant, MA_NOTOWNED); /* * Processed all our interrupts. Now get the sched * lock. This may take a while and it_need may get * set again, so we have to check it again. */ mtx_lock_spin(&sched_lock); if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { TD_SET_IWAIT(td); ie->ie_count = 0; mi_switch(SW_VOL, NULL); } mtx_unlock_spin(&sched_lock); } } #else /* * This is the main code for interrupt threads. */ static void ithread_loop(void *arg) { struct intr_thread *ithd; struct intr_handler *ih; struct intr_event *ie; struct thread *td; struct proc *p; int priv; td = curthread; p = td->td_proc; ih = (struct intr_handler *)arg; priv = (ih->ih_thread != NULL) ? 1 : 0; ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; KASSERT(ithd->it_thread == td, ("%s: ithread and proc linkage out of sync", __func__)); ie = ithd->it_event; ie->ie_count = 0; /* * As long as we have interrupts outstanding, go through the * list of handlers, giving each one a go at it. */ for (;;) { /* * If we are an orphaned thread, then just die. */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, p->p_pid, p->p_comm); free(ithd, M_ITHREAD); kthread_exit(0); } /* * Service interrupts. If another interrupt arrives while * we are running, it will set it_need to note that we * should make another pass. */ while (ithd->it_need) { /* * This might need a full read and write barrier * to make sure that this write posts before any * of the memory or device accesses in the * handlers. */ atomic_store_rel_int(&ithd->it_need, 0); if (priv) priv_ithread_execute_handler(p, ih); else ithread_execute_handlers(p, ie); } WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); mtx_assert(&Giant, MA_NOTOWNED); /* * Processed all our interrupts. Now get the sched * lock. This may take a while and it_need may get * set again, so we have to check it again. */ mtx_lock_spin(&sched_lock); if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { TD_SET_IWAIT(td); ie->ie_count = 0; mi_switch(SW_VOL, NULL); } mtx_unlock_spin(&sched_lock); } } /* * Main loop for interrupt filter. * * Some architectures (i386, amd64 and arm) require the optional frame * parameter, and use it as the main argument for fast handler execution * when ih_argument == NULL. * * Return value: * o FILTER_STRAY: No filter recognized the event, and no * filter-less handler is registered on this * line. * o FILTER_HANDLED: A filter claimed the event and served it. * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at * least one filter-less handler on this line. * o FILTER_HANDLED | * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for * scheduling the per-handler ithread. * * In case an ithread has to be scheduled, in *ithd there will be a * pointer to a struct intr_thread containing the thread to be * scheduled. */ int intr_filter_loop(struct intr_event *ie, struct trapframe *frame, struct intr_thread **ithd) { struct intr_handler *ih; void *arg; int ret, thread_only; ret = 0; thread_only = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { /* * Execute fast interrupt handlers directly. * To support clock handlers, if a handler registers * with a NULL argument, then we pass it a pointer to * a trapframe as its argument. */ arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, ih->ih_filter, ih->ih_handler, arg, ih->ih_name); if (ih->ih_filter != NULL) ret = ih->ih_filter(arg); else { thread_only = 1; continue; } - KASSERT(ret != FILTER_SCHEDULE_THREAD, - ("intr_filter_loop: FILTER_SCHEDULE_THREAD from filter")); - if (ret & FILTER_STRAY) continue; else { *ithd = ih->ih_thread; return (ret); } } /* * No filters handled the interrupt and we have at least * one handler without a filter. In this case, we schedule * all of the filter-less handlers to run in the ithread. */ if (thread_only) { *ithd = ie->ie_thread; return (FILTER_SCHEDULE_THREAD); } return (FILTER_STRAY); } /* * Main interrupt handling body. * * Input: * o ie: the event connected to this interrupt. * o frame: some archs (i.e. i386) pass a frame to some. * handlers as their main argument. * Return value: * o 0: everything ok. * o EINVAL: stray interrupt. */ int intr_event_handle(struct intr_event *ie, struct trapframe *frame) { struct intr_thread *ithd; struct thread *td; int thread; ithd = NULL; td = curthread; if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) return (EINVAL); td->td_intr_nesting_level++; thread = 0; critical_enter(); thread = intr_filter_loop(ie, frame, &ithd); /* * If the interrupt was fully served, send it an EOI but leave * it unmasked. Otherwise, mask the source as well as sending * it an EOI. */ if (thread & FILTER_HANDLED) { if (ie->ie_eoi != NULL) ie->ie_eoi(ie->ie_source); } else { if (ie->ie_disab != NULL) ie->ie_disab(ie->ie_source); } critical_exit(); /* Interrupt storm logic */ if (thread & FILTER_STRAY) { ie->ie_count++; if (ie->ie_count < intr_storm_threshold) printf("Interrupt stray detection not present\n"); } /* Schedule an ithread if needed. */ if (thread & FILTER_SCHEDULE_THREAD) { if (intr_event_schedule_thread(ie, ithd) != 0) panic("%s: impossible stray interrupt", __func__); } td->td_intr_nesting_level--; return (0); } #endif #ifdef DDB /* * Dump details about an interrupt handler */ static void db_dump_intrhand(struct intr_handler *ih) { int comma; db_printf("\t%-10s ", ih->ih_name); switch (ih->ih_pri) { case PI_REALTIME: db_printf("CLK "); break; case PI_AV: db_printf("AV "); break; case PI_TTYHIGH: case PI_TTYLOW: db_printf("TTY "); break; case PI_TAPE: db_printf("TAPE"); break; case PI_NET: db_printf("NET "); break; case PI_DISK: case PI_DISKLOW: db_printf("DISK"); break; case PI_DULL: db_printf("DULL"); break; default: if (ih->ih_pri >= PI_SOFT) db_printf("SWI "); else db_printf("%4u", ih->ih_pri); break; } db_printf(" "); db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); db_printf("(%p)", ih->ih_argument); if (ih->ih_need || (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | IH_MPSAFE)) != 0) { db_printf(" {"); comma = 0; if (ih->ih_flags & IH_EXCLUSIVE) { if (comma) db_printf(", "); db_printf("EXCL"); comma = 1; } if (ih->ih_flags & IH_ENTROPY) { if (comma) db_printf(", "); db_printf("ENTROPY"); comma = 1; } if (ih->ih_flags & IH_DEAD) { if (comma) db_printf(", "); db_printf("DEAD"); comma = 1; } if (ih->ih_flags & IH_MPSAFE) { if (comma) db_printf(", "); db_printf("MPSAFE"); comma = 1; } if (ih->ih_need) { if (comma) db_printf(", "); db_printf("NEED"); } db_printf("}"); } db_printf("\n"); } /* * Dump details about a event. */ void db_dump_intr_event(struct intr_event *ie, int handlers) { struct intr_handler *ih; struct intr_thread *it; int comma; db_printf("%s ", ie->ie_fullname); it = ie->ie_thread; if (it != NULL) db_printf("(pid %d)", it->it_thread->td_proc->p_pid); else db_printf("(no thread)"); if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || (it != NULL && it->it_need)) { db_printf(" {"); comma = 0; if (ie->ie_flags & IE_SOFT) { db_printf("SOFT"); comma = 1; } if (ie->ie_flags & IE_ENTROPY) { if (comma) db_printf(", "); db_printf("ENTROPY"); comma = 1; } if (ie->ie_flags & IE_ADDING_THREAD) { if (comma) db_printf(", "); db_printf("ADDING_THREAD"); comma = 1; } if (it != NULL && it->it_need) { if (comma) db_printf(", "); db_printf("NEED"); } db_printf("}"); } db_printf("\n"); if (handlers) TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) db_dump_intrhand(ih); } /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(intr, db_show_intr) { struct intr_event *ie; int all, verbose; verbose = index(modif, 'v') != NULL; all = index(modif, 'a') != NULL; TAILQ_FOREACH(ie, &event_list, ie_list) { if (!all && TAILQ_EMPTY(&ie->ie_handlers)) continue; db_dump_intr_event(ie, verbose); if (db_pager_quit) break; } } #endif /* DDB */ /* * Start standard software interrupt threads */ static void start_softintr(void *dummy) { struct proc *p; if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, INTR_MPSAFE, &softclock_ih) || swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) panic("died while creating standard software ithreads"); p = clk_intr_event->ie_thread->it_thread->td_proc; PROC_LOCK(p); p->p_flag |= P_NOLOAD; PROC_UNLOCK(p); } SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) /* * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. * The data for this machine dependent, and the declarations are in machine * dependent code. The layout of intrnames and intrcnt however is machine * independent. * * We do not know the length of intrcnt and intrnames at compile time, so * calculate things at run time. */ static int sysctl_intrnames(SYSCTL_HANDLER_ARGS) { return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, req)); } SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, sysctl_intrnames, "", "Interrupt Names"); static int sysctl_intrcnt(SYSCTL_HANDLER_ARGS) { return (sysctl_handle_opaque(oidp, intrcnt, (char *)eintrcnt - (char *)intrcnt, req)); } SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); #ifdef DDB /* * DDB command to dump the interrupt statistics. */ DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) { u_long *i; char *cp; cp = intrnames; for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { if (*cp == '\0') break; if (*i != 0) db_printf("%s\t%lu\n", cp, *i); cp += strlen(cp) + 1; } } #endif Index: head/sys/powerpc/powerpc/intr_machdep.c =================================================================== --- head/sys/powerpc/powerpc/intr_machdep.c (revision 170161) +++ head/sys/powerpc/powerpc/intr_machdep.c (revision 170162) @@ -1,249 +1,258 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STRAY_LOG 5 MALLOC_DEFINE(M_INTR, "intr", "interrupt handler data"); struct ppc_intr { struct intr_event *event; long *cntp; int cntidx; }; static struct mtx ppc_intrs_lock; static struct ppc_intr **ppc_intrs; static u_int ppc_nintrs; static int intrcnt_index; static void (*irq_enable)(uintptr_t); static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } void intr_init(void (*handler)(void), int nirq, void (*irq_e)(uintptr_t), void (*irq_d)(uintptr_t)) { uint32_t msr; if (ppc_intrs != NULL) panic("intr_init: interrupts initialized twice\n"); ppc_nintrs = nirq; ppc_intrs = malloc(nirq * sizeof(struct ppc_intr *), M_INTR, M_NOWAIT|M_ZERO); if (ppc_intrs == NULL) panic("intr_init: unable to allocate interrupt handler array"); mtx_init(&ppc_intrs_lock, "intr table", NULL, MTX_SPIN); irq_enable = irq_e; intrcnt_setname("???", 0); intrcnt_index = 1; msr = mfmsr(); mtmsr(msr & ~PSL_EE); ext_intr_install(handler); mtmsr(msr); } int inthand_add(const char *name, u_int irq, driver_filter_t *filter, void (*handler)(void *), void *arg, int flags, void **cookiep) { struct ppc_intr *i, *orphan; u_int idx; int error; /* * Work around a race where more than one CPU may be registering * handlers on the same IRQ at the same time. */ mtx_lock_spin(&ppc_intrs_lock); i = ppc_intrs[irq]; mtx_unlock_spin(&ppc_intrs_lock); if (i == NULL) { i = malloc(sizeof(*i), M_INTR, M_NOWAIT); if (i == NULL) return (ENOMEM); error = intr_event_create(&i->event, (void *)irq, 0, (void (*)(void *))irq_enable, "irq%d:", irq); if (error) { free(i, M_INTR); return (error); } mtx_lock_spin(&ppc_intrs_lock); if (ppc_intrs[irq] != NULL) { orphan = i; i = ppc_intrs[irq]; mtx_unlock_spin(&ppc_intrs_lock); intr_event_destroy(orphan->event); free(orphan, M_INTR); } else { ppc_intrs[irq] = i; idx = intrcnt_index++; mtx_unlock_spin(&ppc_intrs_lock); i->cntidx = idx; i->cntp = &intrcnt[idx]; intrcnt_setname(i->event->ie_fullname, idx); } } error = intr_event_add_handler(i->event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (!error) intrcnt_setname(i->event->ie_fullname, i->cntidx); return (error); } int inthand_remove(u_int irq, void *cookie) { return (intr_event_remove_handler(cookie)); } void intr_handle(u_int irq) { struct ppc_intr *i; struct intr_event *ie; struct intr_handler *ih; - int error, sched; + int error, sched, ret; i = ppc_intrs[irq]; if (i == NULL) goto stray; atomic_add_long(i->cntp, 1); ie = i->event; KASSERT(ie != NULL, ("%s: interrupt without an event", __func__)); if (TAILQ_EMPTY(&ie->ie_handlers)) goto stray; /* * Execute all fast interrupt handlers directly without Giant. Note * that this means that any fast interrupt handler must be MP safe. */ + ret = 0; sched = 0; critical_enter(); TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { sched = 1; continue; } CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, ih->ih_filter, ih->ih_argument, ih->ih_name); - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!sched) { + if (ret == FILTER_SCHEDULE_THREAD) + sched = 1; + } } critical_exit(); if (sched) { error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("%s: impossible stray interrupt", __func__)); } else irq_enable(irq); return; stray: atomic_add_long(&intrcnt[0], 1); if (intrcnt[0] <= MAX_STRAY_LOG) { printf("stray irq %d\n", irq); if (intrcnt[0] >= MAX_STRAY_LOG) { printf("got %d stray interrupts, not logging anymore\n", MAX_STRAY_LOG); } } } Index: head/sys/sparc64/sparc64/intr_machdep.c =================================================================== --- head/sys/sparc64/sparc64/intr_machdep.c (revision 170161) +++ head/sys/sparc64/sparc64/intr_machdep.c (revision 170162) @@ -1,345 +1,354 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static u_long intr_stray_count[IV_MAX]; static char *pil_names[] = { "stray", "low", /* PIL_LOW */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "stop", /* PIL_STOP */ "stray", "stray", "stray", "stray", "stray", "stray", "stray", "fast", /* PIL_FAST */ "tick", /* PIL_TICK */ }; /* protect the intr_vectors table */ static struct mtx intr_table_lock; static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); /* * not MPSAFE */ static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; } static int intrcnt_setname(const char *name, int index) { if (intrnames + (MAXCOMLEN + 1) * index >= eintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; u_long ps; ps = intr_disable(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); intr_handlers[pri] = ihf; intr_restore(ps); } static void intr_stray_level(struct trapframe *tf) { printf("stray level interrupt %ld\n", tf->tf_level); } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; iv = cookie; if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", iv->iv_vec); intr_stray_count[iv->iv_vec]++; if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG) printf("got %d stray interrupt %d's: not logging " "anymore\n", MAX_STRAY_LOG, iv->iv_vec); } } void intr_init1() { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; } intr_handlers[PIL_LOW] = intr_fast; } void intr_init2() { mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN); } static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; struct intr_event *ie; struct intr_handler *ih; - int error, thread; + int error, thread, ret; iv = cookie; ie = iv->iv_event; if (ie == NULL) { intr_stray_vector(iv); return; } /* Execute fast interrupt handlers directly. */ + ret = 0; thread = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL); CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__, ih->ih_filter, ih->ih_argument); - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } /* Schedule a heavyweight interrupt process. */ if (thread) error = intr_event_schedule_thread(ie); else if (TAILQ_EMPTY(&ie->ie_handlers)) error = EINVAL; else error = 0; if (error == EINVAL) intr_stray_vector(iv); } int inthand_add(const char *name, int vec, driver_filter_t *filt, void (*handler)(void *), void *arg, int flags, void **cookiep) { struct intr_vector *iv; struct intr_event *ie; /* descriptor for the IRQ */ struct intr_event *orphan; int errcode; /* * Work around a race where more than one CPU may be registering * handlers on the same IRQ at the same time. */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); if (ie == NULL) { errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL, "vec%d:", vec); if (errcode) return (errcode); mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) { iv->iv_event = ie; mtx_unlock_spin(&intr_table_lock); } else { orphan = ie; ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); intr_event_destroy(orphan); } } errcode = intr_event_add_handler(ie, name, filt, handler, arg, intr_priority(flags), flags, cookiep); if (errcode) return (errcode); intr_setup(filt != NULL ? PIL_FAST : PIL_ITHREAD, intr_fast, vec, intr_execute_handlers, iv); intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? * XXX: aren't the PIL's backwards below? */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) intr_setup(PIL_ITHREAD, intr_fast, vec, intr_stray_vector, iv); else intr_setup(PIL_LOW, intr_fast, vec, intr_execute_handlers, iv); mtx_unlock_spin(&intr_table_lock); } return (error); } Index: head/sys/sun4v/sun4v/intr_machdep.c =================================================================== --- head/sys/sun4v/sun4v/intr_machdep.c (revision 170161) +++ head/sys/sun4v/sun4v/intr_machdep.c (revision 170162) @@ -1,497 +1,506 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PANIC_IF(exp) if (unlikely(exp)) {panic("%s: %s:%d", #exp, __FILE__, __LINE__);} #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static u_long intr_stray_count[IV_MAX]; struct ithread_vector_handler { iv_func_t *ivh_handler; void *ivh_arg; u_int ivh_vec; }; static char *pil_names[] = { "stray", "low", /* PIL_LOW */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "stop", /* PIL_STOP */ "preempt", /* PIL_PREEMPT */ "stray", "stray", "stray", "stray", "stray", "stray", "fast", /* PIL_FAST */ "tick", /* PIL_TICK */ }; /* * XXX SUN4V_FIXME - the queue size values should * really be calculated based on the size of the partition * */ int cpu_q_entries = 128; int dev_q_entries = 128; static vm_offset_t *mondo_data_array; static vm_offset_t *cpu_list_array; static vm_offset_t *cpu_q_array; static vm_offset_t *dev_q_array; static vm_offset_t *rq_array; static vm_offset_t *nrq_array; static int cpu_list_size; /* protect the intr_vectors table */ static struct mtx intr_table_lock; static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); static void cpu_intrq_alloc(void); /* * not MPSAFE */ static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; } static int intrcnt_setname(const char *name, int index) { if (intrnames + (MAXCOMLEN + 1) * index >= eintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; u_long ps; ps = intr_disable_all(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); intr_handlers[pri] = ihf; intr_restore_all(ps); } static void intr_stray_level(struct trapframe *tf) { printf("stray level interrupt - pil=%ld\n", tf->tf_pil); } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; iv = cookie; if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", iv->iv_vec); intr_stray_count[iv->iv_vec]++; if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG) printf("got %d stray interrupt %d's: not logging " "anymore\n", MAX_STRAY_LOG, iv->iv_vec); } } static void intr_init(void) { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; } intr_handlers[PIL_LOW] = intr_fast; #ifdef SMP intr_handlers[PIL_AST] = cpu_ipi_ast; intr_handlers[PIL_RENDEZVOUS] = (ih_func_t *)smp_rendezvous_action; intr_handlers[PIL_STOP]= cpu_ipi_stop; intr_handlers[PIL_PREEMPT]= cpu_ipi_preempt; #endif mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN); cpu_intrq_alloc(); cpu_intrq_init(); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; struct intr_event *ie; struct intr_handler *ih; - int fast, thread; + int fast, thread, ret; iv = cookie; ie = iv->iv_event; if (ie == NULL) { intr_stray_vector(iv); return; } + ret = 0; fast = thread = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter == NULL) { thread = 1; continue; } MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL); CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__, ih->ih_filter, ih->ih_argument); - ih->ih_filter(ih->ih_argument); + ret = ih->ih_filter(ih->ih_argument); fast = 1; + /* + * Wrapper handler special case: see + * i386/intr_machdep.c::intr_execute_handlers() + */ + if (!thread) { + if (ret == FILTER_SCHEDULE_THREAD) + thread = 1; + } } /* Schedule a heavyweight interrupt process. */ if (thread) intr_event_schedule_thread(ie); else if (TAILQ_EMPTY(&ie->ie_handlers)) intr_stray_vector(iv); if (fast) hv_intr_setstate(iv->iv_vec, HV_INTR_IDLE_STATE); } static void ithread_wrapper(void *arg) { struct ithread_vector_handler *ivh = (struct ithread_vector_handler *)arg; ivh->ivh_handler(ivh->ivh_arg); /* re-enable interrupt */ hv_intr_setstate(ivh->ivh_vec, HV_INTR_IDLE_STATE); } int inthand_add(const char *name, int vec, driver_filter_t *filt, void (*handler)(void *), void *arg, int flags, void **cookiep) { struct intr_vector *iv; struct intr_event *ie; /* descriptor for the IRQ */ struct intr_event *orphan; struct ithread_vector_handler *ivh; int errcode, pil; if (filt != NULL && handler != NULL) { printf("both filt and handler set is not valid\n"); return (EINVAL); } /* * Work around a race where more than one CPU may be registering * handlers on the same IRQ at the same time. */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); if (ie == NULL) { errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL, "vec%d:", vec); if (errcode) return (errcode); mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) { iv->iv_event = ie; mtx_unlock_spin(&intr_table_lock); } else { orphan = ie; ie = iv->iv_event; mtx_unlock_spin(&intr_table_lock); intr_event_destroy(orphan); } } if (filt == NULL) { ivh = (struct ithread_vector_handler *) malloc(sizeof(struct ithread_vector_handler), M_DEVBUF, M_WAITOK); ivh->ivh_handler = (driver_intr_t *)handler; ivh->ivh_arg = arg; ivh->ivh_vec = vec; errcode = intr_event_add_handler(ie, name, NULL, ithread_wrapper, ivh, intr_priority(flags), flags, cookiep); } else { ivh = NULL; errcode = intr_event_add_handler(ie, name, filt, NULL, arg, intr_priority(flags), flags, cookiep); } if (errcode) { if (ivh) free(ivh, M_DEVBUF); return (errcode); } pil = (filt != NULL) ? PIL_FAST : PIL_ITHREAD; intr_setup(pil, intr_fast, vec, intr_execute_handlers, iv); intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? * XXX: aren't the PIL's backwards below? */ iv = &intr_vectors[vec]; mtx_lock_spin(&intr_table_lock); if (iv->iv_event == NULL) intr_setup(PIL_ITHREAD, intr_fast, vec, intr_stray_vector, iv); else intr_setup(PIL_LOW, intr_fast, vec, intr_execute_handlers, iv); mtx_unlock_spin(&intr_table_lock); } return (error); } /* * Allocate and register intrq fields */ static void cpu_intrq_alloc(void) { mondo_data_array = malloc(INTR_REPORT_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(mondo_data_array == NULL); cpu_list_size = CPU_LIST_SIZE > INTR_REPORT_SIZE ? CPU_LIST_SIZE : INTR_REPORT_SIZE; cpu_list_array = malloc(cpu_list_size*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(cpu_list_array == NULL); cpu_q_array = malloc(INTR_CPU_Q_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(cpu_q_array == NULL); dev_q_array = malloc(INTR_DEV_Q_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(dev_q_array == NULL); rq_array = malloc(2*CPU_RQ_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(rq_array == NULL); nrq_array = malloc(2*CPU_NRQ_SIZE*MAXCPU, M_DEVBUF, M_WAITOK | M_ZERO); PANIC_IF(nrq_array == NULL); } void cpu_intrq_init() { uint64_t error; pcpup->pc_mondo_data = (vm_offset_t *) ((char *)mondo_data_array + curcpu*INTR_REPORT_SIZE); pcpup->pc_mondo_data_ra = vtophys(pcpup->pc_mondo_data); pcpup->pc_cpu_q = (vm_offset_t *)((char *)cpu_q_array + curcpu*INTR_CPU_Q_SIZE); pcpup->pc_cpu_q_ra = vtophys(pcpup->pc_cpu_q); pcpup->pc_cpu_q_size = INTR_CPU_Q_SIZE; pcpup->pc_dev_q = (vm_offset_t *)((char *)dev_q_array + curcpu*INTR_DEV_Q_SIZE); pcpup->pc_dev_q_ra = vtophys(pcpup->pc_dev_q); pcpup->pc_dev_q_size = INTR_DEV_Q_SIZE; pcpup->pc_rq = (vm_offset_t *)((char *)rq_array + curcpu*2*CPU_RQ_SIZE); pcpup->pc_rq_ra = vtophys(pcpup->pc_rq); pcpup->pc_rq_size = CPU_RQ_SIZE; pcpup->pc_nrq = (vm_offset_t *)((char *)nrq_array + curcpu*2*CPU_NRQ_SIZE); pcpup->pc_nrq_ra = vtophys(pcpup->pc_nrq); pcpup->pc_nrq_size = CPU_NRQ_SIZE; error = hv_cpu_qconf(Q(CPU_MONDO_QUEUE_HEAD), pcpup->pc_cpu_q_ra, cpu_q_entries); if (error != H_EOK) panic("cpu_mondo queue configuration failed: %lu va=%p ra=0x%lx", error, pcpup->pc_cpu_q, pcpup->pc_cpu_q_ra); error = hv_cpu_qconf(Q(DEV_MONDO_QUEUE_HEAD), pcpup->pc_dev_q_ra, dev_q_entries); if (error != H_EOK) panic("dev_mondo queue configuration failed: %lu", error); error = hv_cpu_qconf(Q(RESUMABLE_ERROR_QUEUE_HEAD), pcpup->pc_rq_ra, CPU_RQ_ENTRIES); if (error != H_EOK) panic("resumable error queue configuration failed: %lu", error); error = hv_cpu_qconf(Q(NONRESUMABLE_ERROR_QUEUE_HEAD), pcpup->pc_nrq_ra, CPU_NRQ_ENTRIES); if (error != H_EOK) panic("non-resumable error queue configuration failed: %lu", error); }