Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/subr_intr.c
/*- | /*- | ||||
* Copyright (c) 2012-2014 Jakub Wojciech Klama <jceel@FreeBSD.org>. | * Copyright (c) 2015-2016 Svatopluk Kraus | ||||
* Copyright (c) 2015 Svatopluk Kraus | * Copyright (c) 2015-2016 Michal Meloun | ||||
* Copyright (c) 2015 Michal Meloun | |||||
* All rights reserved. | * All rights reserved. | ||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
* documentation and/or other materials provided with the distribution. | * documentation and/or other materials provided with the distribution. | ||||
* | * | ||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | ||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||||
* SUCH DAMAGE. | * SUCH DAMAGE. | ||||
* | |||||
* $FreeBSD$ | |||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
/* | /* | ||||
* New-style Interrupt Framework | * New-style Interrupt Framework | ||||
* | * | ||||
* TODO: - to support IPI (PPI) enabling on other CPUs if already started | * TODO: - to support IPI (PPI) enabling on other CPUs if already started | ||||
* - to complete things for removable PICs | * - to complete things for removable PICs | ||||
*/ | */ | ||||
#include "opt_acpi.h" | |||||
#include "opt_ddb.h" | #include "opt_ddb.h" | ||||
#include "opt_platform.h" | #include "opt_platform.h" | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/syslog.h> | #include <sys/syslog.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
/* Interrupt source definition. */ | /* Interrupt source definition. */ | ||||
static struct mtx isrc_table_lock; | static struct mtx isrc_table_lock; | ||||
static struct intr_irqsrc *irq_sources[NIRQ]; | static struct intr_irqsrc *irq_sources[NIRQ]; | ||||
u_int irq_next_free; | u_int irq_next_free; | ||||
#define IRQ_INVALID nitems(irq_sources) | #define IRQ_INVALID nitems(irq_sources) | ||||
/* | |||||
* XXX - All stuff around struct intr_dev_data is considered as temporary | |||||
* until better place for storing struct intr_map_data will be find. | |||||
* | |||||
* For now, there are two global interrupt numbers spaces: | |||||
* <0, NIRQ) ... interrupts without config data | |||||
* managed in irq_sources[] | |||||
* IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data | |||||
* managed in intr_ddata_tab[] | |||||
* | |||||
* Read intr_ddata_lookup() to see how these spaces are worked with. | |||||
* Note that each interrupt number from second space duplicates some number | |||||
* from first space at this moment. An interrupt number from first space can | |||||
* be duplicated even multiple times in second space. | |||||
*/ | |||||
struct intr_dev_data { | |||||
device_t idd_dev; | |||||
intptr_t idd_xref; | |||||
u_int idd_irq; | |||||
struct intr_map_data idd_data; | |||||
struct intr_irqsrc * idd_isrc; | |||||
}; | |||||
static struct intr_dev_data *intr_ddata_tab[2 * NIRQ]; | |||||
static u_int intr_ddata_first_unused; | |||||
#define IRQ_DDATA_BASE 10000 | |||||
CTASSERT(IRQ_DDATA_BASE > IRQ_INVALID); | |||||
#ifdef SMP | #ifdef SMP | ||||
static boolean_t irq_assign_cpu = FALSE; | static boolean_t irq_assign_cpu = FALSE; | ||||
#endif | #endif | ||||
/* | /* | ||||
* - 2 counters for each I/O interrupt. | * - 2 counters for each I/O interrupt. | ||||
* - MAXCPU counters for each IPI counters for SMP. | * - MAXCPU counters for each IPI counters for SMP. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Virtualization for interrupt source interrupt counter increment. | * Virtualization for interrupt source interrupt counter increment. | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
isrc_increment_count(struct intr_irqsrc *isrc) | isrc_increment_count(struct intr_irqsrc *isrc) | ||||
{ | { | ||||
/* | if (isrc->isrc_flags & INTR_ISRCF_PPI) | ||||
* XXX - It should be atomic for PPI interrupts. It was proven that | atomic_add_long(&isrc->isrc_count[0], 1); | ||||
* the lost is measurable easily for timer PPI interrupts. | else | ||||
*/ | |||||
isrc->isrc_count[0]++; | isrc->isrc_count[0]++; | ||||
/*atomic_add_long(&isrc->isrc_count[0], 1);*/ | |||||
} | } | ||||
/* | /* | ||||
* Virtualization for interrupt source interrupt stray counter increment. | * Virtualization for interrupt source interrupt stray counter increment. | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
isrc_increment_straycount(struct intr_irqsrc *isrc) | isrc_increment_straycount(struct intr_irqsrc *isrc) | ||||
{ | { | ||||
Show All 38 Lines | isrc_setup_counters(struct intr_irqsrc *isrc) | ||||
* interrupt sources !!! | * interrupt sources !!! | ||||
*/ | */ | ||||
index = atomic_fetchadd_int(&intrcnt_index, 2); | index = atomic_fetchadd_int(&intrcnt_index, 2); | ||||
isrc->isrc_index = index; | isrc->isrc_index = index; | ||||
isrc->isrc_count = &intrcnt[index]; | isrc->isrc_count = &intrcnt[index]; | ||||
isrc_update_name(isrc, NULL); | isrc_update_name(isrc, NULL); | ||||
} | } | ||||
/* | |||||
* Virtualization for interrupt source interrupt counters release. | |||||
*/ | |||||
static void | |||||
isrc_release_counters(struct intr_irqsrc *isrc) | |||||
{ | |||||
panic("%s: not implemented", __func__); | |||||
} | |||||
#ifdef SMP | #ifdef SMP | ||||
/* | /* | ||||
* Virtualization for interrupt source IPI counters setup. | * Virtualization for interrupt source IPI counters setup. | ||||
*/ | */ | ||||
u_long * | u_long * | ||||
intr_ipi_setup_counters(const char *name) | intr_ipi_setup_counters(const char *name) | ||||
{ | { | ||||
u_int index, i; | u_int index, i; | ||||
Show All 30 Lines | intr_irq_handler(struct trapframe *tf) | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
/* | /* | ||||
* interrupt controller dispatch function for interrupts. It should | * interrupt controller dispatch function for interrupts. It should | ||||
* be called straight from the interrupt controller, when associated interrupt | * be called straight from the interrupt controller, when associated interrupt | ||||
* source is learned. | * source is learned. | ||||
*/ | */ | ||||
void | int | ||||
intr_irq_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) | intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) | ||||
{ | { | ||||
KASSERT(isrc != NULL, ("%s: no source", __func__)); | KASSERT(isrc != NULL, ("%s: no source", __func__)); | ||||
isrc_increment_count(isrc); | isrc_increment_count(isrc); | ||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
if (isrc->isrc_filter != NULL) { | if (isrc->isrc_filter != NULL) { | ||||
int error; | int error; | ||||
error = isrc->isrc_filter(isrc->isrc_arg, tf); | error = isrc->isrc_filter(isrc->isrc_arg, tf); | ||||
PIC_POST_FILTER(isrc->isrc_dev, isrc); | PIC_POST_FILTER(isrc->isrc_dev, isrc); | ||||
if (error == FILTER_HANDLED) | if (error == FILTER_HANDLED) | ||||
return; | return (0); | ||||
} else | } else | ||||
#endif | #endif | ||||
if (isrc->isrc_event != NULL) { | if (isrc->isrc_event != NULL) { | ||||
if (intr_event_handle(isrc->isrc_event, tf) == 0) | if (intr_event_handle(isrc->isrc_event, tf) == 0) | ||||
return; | return (0); | ||||
} | } | ||||
isrc_increment_straycount(isrc); | isrc_increment_straycount(isrc); | ||||
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); | return (EINVAL); | ||||
device_printf(isrc->isrc_dev, "stray irq <%s> disabled", | |||||
isrc->isrc_name); | |||||
} | } | ||||
/* | /* | ||||
* Allocate interrupt source. | |||||
*/ | |||||
struct intr_irqsrc * | |||||
intr_isrc_alloc(u_int type, u_int extsize) | |||||
{ | |||||
struct intr_irqsrc *isrc; | |||||
isrc = malloc(sizeof(*isrc) + extsize, M_INTRNG, M_WAITOK | M_ZERO); | |||||
isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ | |||||
isrc->isrc_type = type; | |||||
isrc->isrc_nspc_type = INTR_IRQ_NSPC_NONE; | |||||
isrc->isrc_trig = INTR_TRIGGER_CONFORM; | |||||
isrc->isrc_pol = INTR_POLARITY_CONFORM; | |||||
CPU_ZERO(&isrc->isrc_cpu); | |||||
return (isrc); | |||||
} | |||||
/* | |||||
* Free interrupt source. | |||||
*/ | |||||
void | |||||
intr_isrc_free(struct intr_irqsrc *isrc) | |||||
{ | |||||
free(isrc, M_INTRNG); | |||||
} | |||||
void | |||||
intr_irq_set_name(struct intr_irqsrc *isrc, const char *fmt, ...) | |||||
{ | |||||
va_list ap; | |||||
va_start(ap, fmt); | |||||
vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); | |||||
va_end(ap); | |||||
} | |||||
/* | |||||
* Alloc unique interrupt number (resource handle) for interrupt source. | * Alloc unique interrupt number (resource handle) for interrupt source. | ||||
* | * | ||||
* There could be various strategies how to allocate free interrupt number | * There could be various strategies how to allocate free interrupt number | ||||
* (resource handle) for new interrupt source. | * (resource handle) for new interrupt source. | ||||
* | * | ||||
* 1. Handles are always allocated forward, so handles are not recycled | * 1. Handles are always allocated forward, so handles are not recycled | ||||
* immediately. However, if only one free handle left which is reused | * immediately. However, if only one free handle left which is reused | ||||
* constantly... | * constantly... | ||||
*/ | */ | ||||
static int | static inline int | ||||
isrc_alloc_irq_locked(struct intr_irqsrc *isrc) | isrc_alloc_irq(struct intr_irqsrc *isrc) | ||||
{ | { | ||||
u_int maxirqs, irq; | u_int maxirqs, irq; | ||||
mtx_assert(&isrc_table_lock, MA_OWNED); | mtx_assert(&isrc_table_lock, MA_OWNED); | ||||
maxirqs = nitems(irq_sources); | maxirqs = nitems(irq_sources); | ||||
if (irq_next_free >= maxirqs) | if (irq_next_free >= maxirqs) | ||||
return (ENOSPC); | return (ENOSPC); | ||||
Show All 9 Lines | isrc_alloc_irq(struct intr_irqsrc *isrc) | ||||
irq_next_free = maxirqs; | irq_next_free = maxirqs; | ||||
return (ENOSPC); | return (ENOSPC); | ||||
found: | found: | ||||
isrc->isrc_irq = irq; | isrc->isrc_irq = irq; | ||||
irq_sources[irq] = isrc; | irq_sources[irq] = isrc; | ||||
intr_irq_set_name(isrc, "irq%u", irq); | |||||
isrc_setup_counters(isrc); | |||||
irq_next_free = irq + 1; | irq_next_free = irq + 1; | ||||
if (irq_next_free >= maxirqs) | if (irq_next_free >= maxirqs) | ||||
irq_next_free = 0; | irq_next_free = 0; | ||||
return (0); | return (0); | ||||
} | } | ||||
#ifdef notyet | |||||
/* | /* | ||||
* Free unique interrupt number (resource handle) from interrupt source. | * Free unique interrupt number (resource handle) from interrupt source. | ||||
*/ | */ | ||||
static int | static inline int | ||||
isrc_free_irq(struct intr_irqsrc *isrc) | isrc_free_irq(struct intr_irqsrc *isrc) | ||||
{ | { | ||||
u_int maxirqs; | |||||
mtx_assert(&isrc_table_lock, MA_NOTOWNED); | mtx_assert(&isrc_table_lock, MA_OWNED); | ||||
maxirqs = nitems(irq_sources); | if (isrc->isrc_irq >= nitems(irq_sources)) | ||||
if (isrc->isrc_irq >= maxirqs) | |||||
return (EINVAL); | return (EINVAL); | ||||
if (irq_sources[isrc->isrc_irq] != isrc) | |||||
mtx_lock(&isrc_table_lock); | |||||
if (irq_sources[isrc->isrc_irq] != isrc) { | |||||
mtx_unlock(&isrc_table_lock); | |||||
return (EINVAL); | return (EINVAL); | ||||
} | |||||
irq_sources[isrc->isrc_irq] = NULL; | irq_sources[isrc->isrc_irq] = NULL; | ||||
isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ | isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ | ||||
mtx_unlock(&isrc_table_lock); | |||||
return (0); | return (0); | ||||
} | } | ||||
#endif | |||||
/* | /* | ||||
* Lookup interrupt source by interrupt number (resource handle). | * Lookup interrupt source by interrupt number (resource handle). | ||||
*/ | */ | ||||
static struct intr_irqsrc * | static inline struct intr_irqsrc * | ||||
isrc_lookup(u_int irq) | isrc_lookup(u_int irq) | ||||
{ | { | ||||
if (irq < nitems(irq_sources)) | if (irq < nitems(irq_sources)) | ||||
return (irq_sources[irq]); | return (irq_sources[irq]); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* | /* | ||||
* Lookup interrupt source by namespace description. | * Initialize interrupt source and register it into global interrupt table. | ||||
*/ | */ | ||||
static struct intr_irqsrc * | int | ||||
isrc_namespace_lookup(device_t dev, uint16_t type, uint16_t num) | intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, | ||||
const char *fmt, ...) | |||||
{ | { | ||||
u_int irq; | int error; | ||||
struct intr_irqsrc *isrc; | va_list ap; | ||||
mtx_assert(&isrc_table_lock, MA_OWNED); | bzero(isrc, sizeof(struct intr_irqsrc)); | ||||
isrc->isrc_dev = dev; | |||||
isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ | |||||
isrc->isrc_flags = flags; | |||||
for (irq = 0; irq < nitems(irq_sources); irq++) { | va_start(ap, fmt); | ||||
isrc = irq_sources[irq]; | vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); | ||||
if (isrc != NULL && isrc->isrc_dev == dev && | va_end(ap); | ||||
isrc->isrc_nspc_type == type && isrc->isrc_nspc_num == num) | |||||
return (isrc); | mtx_lock(&isrc_table_lock); | ||||
error = isrc_alloc_irq(isrc); | |||||
if (error != 0) { | |||||
mtx_unlock(&isrc_table_lock); | |||||
return (error); | |||||
} | } | ||||
return (NULL); | /* | ||||
* Setup interrupt counters, but not for IPI sources. Those are setup | |||||
* later and only for used ones (up to INTR_IPI_COUNT) to not exhaust | |||||
* our counter pool. | |||||
*/ | |||||
if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) | |||||
isrc_setup_counters(isrc); | |||||
mtx_unlock(&isrc_table_lock); | |||||
return (0); | |||||
} | } | ||||
/* | /* | ||||
* Map interrupt source according to namespace into framework. If such mapping | * Deregister interrupt source from global interrupt table. | ||||
* does not exist, create it. Return unique interrupt number (resource handle) | |||||
* associated with mapped interrupt source. | |||||
*/ | */ | ||||
u_int | int | ||||
intr_namespace_map_irq(device_t dev, uint16_t type, uint16_t num) | intr_isrc_deregister(struct intr_irqsrc *isrc) | ||||
{ | { | ||||
struct intr_irqsrc *isrc, *new_isrc; | |||||
int error; | int error; | ||||
new_isrc = intr_isrc_alloc(INTR_ISRCT_NAMESPACE, 0); | |||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
isrc = isrc_namespace_lookup(dev, type, num); | if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) | ||||
if (isrc != NULL) { | isrc_release_counters(isrc); | ||||
error = isrc_free_irq(isrc); | |||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
intr_isrc_free(new_isrc); | return (error); | ||||
return (isrc->isrc_irq); /* already mapped */ | |||||
} | } | ||||
error = isrc_alloc_irq_locked(new_isrc); | static struct intr_dev_data * | ||||
if (error != 0) { | intr_ddata_alloc(u_int extsize) | ||||
{ | |||||
struct intr_dev_data *ddata; | |||||
ddata = malloc(sizeof(*ddata) + extsize, M_INTRNG, M_WAITOK | M_ZERO); | |||||
mtx_lock(&isrc_table_lock); | |||||
if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) { | |||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
intr_isrc_free(new_isrc); | free(ddata, M_INTRNG); | ||||
return (IRQ_INVALID); /* no space left */ | return (NULL); | ||||
} | } | ||||
intr_ddata_tab[intr_ddata_first_unused] = ddata; | |||||
new_isrc->isrc_dev = dev; | ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++; | ||||
new_isrc->isrc_nspc_type = type; | |||||
new_isrc->isrc_nspc_num = num; | |||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (ddata); | |||||
return (new_isrc->isrc_irq); | |||||
} | } | ||||
#ifdef FDT | |||||
/* | |||||
* Lookup interrupt source by FDT description. | |||||
*/ | |||||
static struct intr_irqsrc * | static struct intr_irqsrc * | ||||
isrc_fdt_lookup(intptr_t xref, pcell_t *cells, u_int ncells) | intr_ddata_lookup(u_int irq, struct intr_map_data **datap) | ||||
{ | { | ||||
u_int irq, cellsize; | int error; | ||||
struct intr_irqsrc *isrc; | struct intr_irqsrc *isrc; | ||||
struct intr_dev_data *ddata; | |||||
mtx_assert(&isrc_table_lock, MA_OWNED); | isrc = isrc_lookup(irq); | ||||
if (isrc != NULL) { | |||||
cellsize = ncells * sizeof(*cells); | if (datap != NULL) | ||||
for (irq = 0; irq < nitems(irq_sources); irq++) { | *datap = NULL; | ||||
isrc = irq_sources[irq]; | |||||
if (isrc != NULL && isrc->isrc_type == INTR_ISRCT_FDT && | |||||
isrc->isrc_xref == xref && isrc->isrc_ncells == ncells && | |||||
memcmp(isrc->isrc_cells, cells, cellsize) == 0) | |||||
return (isrc); | return (isrc); | ||||
} | } | ||||
if (irq < IRQ_DDATA_BASE) | |||||
return (NULL); | return (NULL); | ||||
irq -= IRQ_DDATA_BASE; | |||||
if (irq >= nitems(intr_ddata_tab)) | |||||
return (NULL); | |||||
ddata = intr_ddata_tab[irq]; | |||||
if (ddata->idd_isrc == NULL) { | |||||
error = intr_map_irq(ddata->idd_dev, ddata->idd_xref, | |||||
&ddata->idd_data, &irq); | |||||
if (error != 0) | |||||
return (NULL); | |||||
ddata->idd_isrc = isrc_lookup(irq); | |||||
} | } | ||||
if (datap != NULL) | |||||
*datap = &ddata->idd_data; | |||||
return (ddata->idd_isrc); | |||||
} | |||||
#ifdef DEV_ACPI | |||||
/* | /* | ||||
* Map interrupt source according to FDT data into framework. If such mapping | * Map interrupt source according to ACPI info into framework. If such mapping | ||||
* does not exist, create it. Return unique interrupt number (resource handle) | * does not exist, create it. Return unique interrupt number (resource handle) | ||||
* associated with mapped interrupt source. | * associated with mapped interrupt source. | ||||
*/ | */ | ||||
u_int | u_int | ||||
intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells) | intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol, | ||||
enum intr_trigger trig) | |||||
{ | { | ||||
struct intr_irqsrc *isrc, *new_isrc; | struct intr_dev_data *ddata; | ||||
u_int cellsize; | |||||
intptr_t xref; | |||||
int error; | |||||
xref = (intptr_t)node; /* It's so simple for now. */ | ddata = intr_ddata_alloc(0); | ||||
if (ddata == NULL) | |||||
return (0xFFFFFFFF); /* no space left */ | |||||
andrew: This assumes a 32 bit `u_int` | |||||
Not Done Inline ActionsYes, u_int is now 32 bit in all fbsd supported platforms. But I see your point and it's correct. However, this function will be removed in next step. skra: Yes, u_int is now 32 bit in all fbsd supported platforms. But I see your point and it's correct. | |||||
cellsize = ncells * sizeof(*cells); | ddata->idd_dev = dev; | ||||
new_isrc = intr_isrc_alloc(INTR_ISRCT_FDT, cellsize); | ddata->idd_data.type = INTR_MAP_DATA_ACPI; | ||||
ddata->idd_data.acpi.irq = irq; | |||||
mtx_lock(&isrc_table_lock); | ddata->idd_data.acpi.pol = pol; | ||||
isrc = isrc_fdt_lookup(xref, cells, ncells); | ddata->idd_data.acpi.trig = trig; | ||||
if (isrc != NULL) { | return (ddata->idd_irq); | ||||
mtx_unlock(&isrc_table_lock); | |||||
intr_isrc_free(new_isrc); | |||||
return (isrc->isrc_irq); /* already mapped */ | |||||
} | } | ||||
error = isrc_alloc_irq_locked(new_isrc); | |||||
if (error != 0) { | |||||
mtx_unlock(&isrc_table_lock); | |||||
intr_isrc_free(new_isrc); | |||||
return (IRQ_INVALID); /* no space left */ | |||||
} | |||||
new_isrc->isrc_xref = xref; | |||||
new_isrc->isrc_ncells = ncells; | |||||
memcpy(new_isrc->isrc_cells, cells, cellsize); | |||||
mtx_unlock(&isrc_table_lock); | |||||
return (new_isrc->isrc_irq); | |||||
} | |||||
#endif | #endif | ||||
#ifdef FDT | |||||
/* | /* | ||||
* Register interrupt source into interrupt controller. | * Map interrupt source according to FDT data into framework. If such mapping | ||||
* does not exist, create it. Return unique interrupt number (resource handle) | |||||
* associated with mapped interrupt source. | |||||
*/ | */ | ||||
static int | u_int | ||||
isrc_register(struct intr_irqsrc *isrc) | intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells) | ||||
{ | { | ||||
struct intr_pic *pic; | struct intr_dev_data *ddata; | ||||
boolean_t is_percpu; | u_int cellsize; | ||||
int error; | |||||
if (isrc->isrc_flags & INTR_ISRCF_REGISTERED) | cellsize = ncells * sizeof(*cells); | ||||
return (0); | ddata = intr_ddata_alloc(cellsize); | ||||
if (ddata == NULL) | |||||
return (0xFFFFFFFF); /* no space left */ | |||||
if (isrc->isrc_dev == NULL) { | ddata->idd_xref = (intptr_t)node; | ||||
pic = pic_lookup(NULL, isrc->isrc_xref); | ddata->idd_data.type = INTR_MAP_DATA_FDT; | ||||
if (pic == NULL || pic->pic_dev == NULL) | ddata->idd_data.fdt.ncells = ncells; | ||||
return (ESRCH); | ddata->idd_data.fdt.cells = (pcell_t *)(ddata + 1); | ||||
isrc->isrc_dev = pic->pic_dev; | memcpy(ddata->idd_data.fdt.cells, cells, cellsize); | ||||
return (ddata->idd_irq); | |||||
} | } | ||||
#endif | |||||
error = PIC_REGISTER(isrc->isrc_dev, isrc, &is_percpu); | |||||
if (error != 0) | |||||
return (error); | |||||
mtx_lock(&isrc_table_lock); | |||||
isrc->isrc_flags |= INTR_ISRCF_REGISTERED; | |||||
if (is_percpu) | |||||
isrc->isrc_flags |= INTR_ISRCF_PERCPU; | |||||
isrc_update_name(isrc, NULL); | |||||
mtx_unlock(&isrc_table_lock); | |||||
return (0); | |||||
} | |||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
/* | /* | ||||
* Setup filter into interrupt source. | * Setup filter into interrupt source. | ||||
*/ | */ | ||||
static int | static int | ||||
iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, | iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, | ||||
intr_irq_filter_t *filter, void *arg, void **cookiep) | intr_irq_filter_t *filter, void *arg, void **cookiep) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | #ifdef SMP | ||||
/* | /* | ||||
* In NOCPU case, it's up to PIC to either leave ISRC on same CPU or | * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or | ||||
* re-balance it to another CPU or enable it on more CPUs. However, | * re-balance it to another CPU or enable it on more CPUs. However, | ||||
* PIC is expected to change isrc_cpu appropriately to keep us well | * PIC is expected to change isrc_cpu appropriately to keep us well | ||||
* informed if the call is successfull. | * informed if the call is successfull. | ||||
*/ | */ | ||||
if (irq_assign_cpu) { | if (irq_assign_cpu) { | ||||
error = PIC_BIND(isrc->isrc_dev, isrc); | error = PIC_BIND_INTR(isrc->isrc_dev, isrc); | ||||
if (error) { | if (error) { | ||||
CPU_ZERO(&isrc->isrc_cpu); | CPU_ZERO(&isrc->isrc_cpu); | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (error); | return (error); | ||||
} | } | ||||
} | } | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (0); | return (0); | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | isrc_add_handler(struct intr_irqsrc *isrc, const char *name, | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Lookup interrupt controller locked. | * Lookup interrupt controller locked. | ||||
*/ | */ | ||||
static struct intr_pic * | static inline struct intr_pic * | ||||
pic_lookup_locked(device_t dev, intptr_t xref) | pic_lookup_locked(device_t dev, intptr_t xref) | ||||
{ | { | ||||
struct intr_pic *pic; | struct intr_pic *pic; | ||||
mtx_assert(&pic_list_lock, MA_OWNED); | mtx_assert(&pic_list_lock, MA_OWNED); | ||||
SLIST_FOREACH(pic, &pic_list, pic_next) { | SLIST_FOREACH(pic, &pic_list, pic_next) { | ||||
if (pic->pic_xref != xref) | if (pic->pic_xref != xref) | ||||
Show All 10 Lines | |||||
static struct intr_pic * | static struct intr_pic * | ||||
pic_lookup(device_t dev, intptr_t xref) | pic_lookup(device_t dev, intptr_t xref) | ||||
{ | { | ||||
struct intr_pic *pic; | struct intr_pic *pic; | ||||
mtx_lock(&pic_list_lock); | mtx_lock(&pic_list_lock); | ||||
pic = pic_lookup_locked(dev, xref); | pic = pic_lookup_locked(dev, xref); | ||||
mtx_unlock(&pic_list_lock); | mtx_unlock(&pic_list_lock); | ||||
return (pic); | return (pic); | ||||
} | } | ||||
/* | /* | ||||
* Create interrupt controller. | * Create interrupt controller. | ||||
*/ | */ | ||||
static struct intr_pic * | static struct intr_pic * | ||||
pic_create(device_t dev, intptr_t xref) | pic_create(device_t dev, intptr_t xref) | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | debugf("PIC %p registered for %s <xref %x>\n", pic, | ||||
device_get_nameunit(dev), xref); | device_get_nameunit(dev), xref); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Unregister interrupt controller. | * Unregister interrupt controller. | ||||
*/ | */ | ||||
int | int | ||||
intr_pic_unregister(device_t dev, intptr_t xref) | intr_pic_deregister(device_t dev, intptr_t xref) | ||||
{ | { | ||||
panic("%s: not implemented", __func__); | panic("%s: not implemented", __func__); | ||||
} | } | ||||
/* | /* | ||||
* Mark interrupt controller (itself) as a root one. | * Mark interrupt controller (itself) as a root one. | ||||
* | * | ||||
Show All 35 Lines | intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, | ||||
irq_root_arg = arg; | irq_root_arg = arg; | ||||
irq_root_ipicount = ipicount; | irq_root_ipicount = ipicount; | ||||
debugf("irq root set to %s\n", device_get_nameunit(dev)); | debugf("irq root set to %s\n", device_get_nameunit(dev)); | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
intr_irq_add_handler(device_t dev, driver_filter_t filt, driver_intr_t hand, | intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data, | ||||
void *arg, u_int irq, int flags, void **cookiep) | u_int *irqp) | ||||
{ | { | ||||
const char *name; | int error; | ||||
struct intr_irqsrc *isrc; | struct intr_irqsrc *isrc; | ||||
struct intr_pic *pic; | |||||
if (data == NULL) | |||||
return (EINVAL); | |||||
pic = pic_lookup(dev, xref); | |||||
if (pic == NULL || pic->pic_dev == NULL) | |||||
return (ESRCH); | |||||
error = PIC_MAP_INTR(pic->pic_dev, data, &isrc); | |||||
if (error == 0) | |||||
*irqp = isrc->isrc_irq; | |||||
return (error); | |||||
} | |||||
int | |||||
intr_alloc_irq(device_t dev, struct resource *res) | |||||
{ | |||||
struct intr_map_data *data; | |||||
struct intr_irqsrc *isrc; | |||||
KASSERT(rman_get_start(res) == rman_get_end(res), | |||||
("%s: more interrupts in resource", __func__)); | |||||
isrc = intr_ddata_lookup(rman_get_start(res), &data); | |||||
if (isrc == NULL) | |||||
return (EINVAL); | |||||
return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data)); | |||||
} | |||||
int | |||||
intr_release_irq(device_t dev, struct resource *res) | |||||
{ | |||||
struct intr_map_data *data; | |||||
struct intr_irqsrc *isrc; | |||||
KASSERT(rman_get_start(res) == rman_get_end(res), | |||||
("%s: more interrupts in resource", __func__)); | |||||
isrc = intr_ddata_lookup(rman_get_start(res), &data); | |||||
if (isrc == NULL) | |||||
return (EINVAL); | |||||
return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data)); | |||||
} | |||||
int | |||||
intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, | |||||
driver_intr_t hand, void *arg, int flags, void **cookiep) | |||||
{ | |||||
int error; | int error; | ||||
struct intr_map_data *data; | |||||
struct intr_irqsrc *isrc; | |||||
const char *name; | |||||
KASSERT(rman_get_start(res) == rman_get_end(res), | |||||
("%s: more interrupts in resource", __func__)); | |||||
isrc = intr_ddata_lookup(rman_get_start(res), &data); | |||||
if (isrc == NULL) | |||||
return (EINVAL); | |||||
name = device_get_nameunit(dev); | name = device_get_nameunit(dev); | ||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
/* | /* | ||||
* Standard handling is done thru MI interrupt framework. However, | * Standard handling is done thru MI interrupt framework. However, | ||||
* some interrupts could request solely own special handling. This | * some interrupts could request solely own special handling. This | ||||
* non standard handling can be used for interrupt controllers without | * non standard handling can be used for interrupt controllers without | ||||
* handler (filter only), so in case that interrupt controllers are | * handler (filter only), so in case that interrupt controllers are | ||||
* chained, MI interrupt framework is called only in leaf controller. | * chained, MI interrupt framework is called only in leaf controller. | ||||
* | * | ||||
* Note that root interrupt controller routine is served as well, | * Note that root interrupt controller routine is served as well, | ||||
* however in intr_irq_handler(), i.e. main system dispatch routine. | * however in intr_irq_handler(), i.e. main system dispatch routine. | ||||
*/ | */ | ||||
if (flags & INTR_SOLO && hand != NULL) { | if (flags & INTR_SOLO && hand != NULL) { | ||||
debugf("irq %u cannot solo on %s\n", irq, name); | debugf("irq %u cannot solo on %s\n", irq, name); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
#endif | |||||
isrc = isrc_lookup(irq); | |||||
if (isrc == NULL) { | |||||
debugf("irq %u without source on %s\n", irq, name); | |||||
return (EINVAL); | |||||
} | |||||
error = isrc_register(isrc); | |||||
if (error != 0) { | |||||
debugf("irq %u map error %d on %s\n", irq, error, name); | |||||
return (error); | |||||
} | |||||
#ifdef INTR_SOLO | |||||
if (flags & INTR_SOLO) { | if (flags & INTR_SOLO) { | ||||
error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, | error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, | ||||
arg, cookiep); | arg, cookiep); | ||||
debugf("irq %u setup filter error %d on %s\n", irq, error, | debugf("irq %u setup filter error %d on %s\n", irq, error, | ||||
name); | name); | ||||
} else | } else | ||||
#endif | #endif | ||||
{ | { | ||||
error = isrc_add_handler(isrc, name, filt, hand, arg, flags, | error = isrc_add_handler(isrc, name, filt, hand, arg, flags, | ||||
cookiep); | cookiep); | ||||
debugf("irq %u add handler error %d on %s\n", irq, error, name); | debugf("irq %u add handler error %d on %s\n", irq, error, name); | ||||
} | } | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); | |||||
if (error == 0) { | |||||
isrc->isrc_handlers++; | isrc->isrc_handlers++; | ||||
if (isrc->isrc_handlers == 1) { | if (isrc->isrc_handlers == 1) | ||||
PIC_ENABLE_INTR(isrc->isrc_dev, isrc); | PIC_ENABLE_INTR(isrc->isrc_dev, isrc); | ||||
PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); | |||||
} | } | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (0); | if (error != 0) | ||||
intr_event_remove_handler(*cookiep); | |||||
return (error); | |||||
} | } | ||||
int | int | ||||
intr_irq_remove_handler(device_t dev, u_int irq, void *cookie) | intr_teardown_irq(device_t dev, struct resource *res, void *cookie) | ||||
{ | { | ||||
struct intr_irqsrc *isrc; | |||||
int error; | int error; | ||||
struct intr_map_data *data; | |||||
struct intr_irqsrc *isrc; | |||||
isrc = isrc_lookup(irq); | KASSERT(rman_get_start(res) == rman_get_end(res), | ||||
("%s: more interrupts in resource", __func__)); | |||||
isrc = intr_ddata_lookup(rman_get_start(res), &data); | |||||
if (isrc == NULL || isrc->isrc_handlers == 0) | if (isrc == NULL || isrc->isrc_handlers == 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
if (isrc->isrc_filter != NULL) { | if (isrc->isrc_filter != NULL) { | ||||
if (isrc != cookie) | if (isrc != cookie) | ||||
return (EINVAL); | return (EINVAL); | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
isrc->isrc_filter = NULL; | isrc->isrc_filter = NULL; | ||||
isrc->isrc_arg = NULL; | isrc->isrc_arg = NULL; | ||||
isrc->isrc_handlers = 0; | isrc->isrc_handlers = 0; | ||||
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); | |||||
PIC_DISABLE_INTR(isrc->isrc_dev, isrc); | PIC_DISABLE_INTR(isrc->isrc_dev, isrc); | ||||
PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); | |||||
isrc_update_name(isrc, NULL); | isrc_update_name(isrc, NULL); | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (0); | return (0); | ||||
} | } | ||||
#endif | #endif | ||||
if (isrc != intr_handler_source(cookie)) | if (isrc != intr_handler_source(cookie)) | ||||
return (EINVAL); | return (EINVAL); | ||||
error = intr_event_remove_handler(cookie); | error = intr_event_remove_handler(cookie); | ||||
if (error == 0) { | if (error == 0) { | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
isrc->isrc_handlers--; | isrc->isrc_handlers--; | ||||
if (isrc->isrc_handlers == 0) { | if (isrc->isrc_handlers == 0) | ||||
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); | |||||
PIC_DISABLE_INTR(isrc->isrc_dev, isrc); | PIC_DISABLE_INTR(isrc->isrc_dev, isrc); | ||||
} | PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); | ||||
intrcnt_updatename(isrc); | intrcnt_updatename(isrc); | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
int | int | ||||
intr_irq_config(u_int irq, enum intr_trigger trig, enum intr_polarity pol) | intr_describe_irq(device_t dev, struct resource *res, void *cookie, | ||||
const char *descr) | |||||
{ | { | ||||
int error; | |||||
struct intr_irqsrc *isrc; | struct intr_irqsrc *isrc; | ||||
isrc = isrc_lookup(irq); | KASSERT(rman_get_start(res) == rman_get_end(res), | ||||
if (isrc == NULL) | ("%s: more interrupts in resource", __func__)); | ||||
return (EINVAL); | |||||
if (isrc->isrc_handlers != 0) | isrc = intr_ddata_lookup(rman_get_start(res), NULL); | ||||
return (EBUSY); /* interrrupt is enabled (active) */ | |||||
/* | |||||
* Once an interrupt is enabled, we do not change its configuration. | |||||
* A controller PIC_ENABLE_INTR() method is called when an interrupt | |||||
* is going to be enabled. In this method, a controller should setup | |||||
* the interrupt according to saved configuration parameters. | |||||
*/ | |||||
isrc->isrc_trig = trig; | |||||
isrc->isrc_pol = pol; | |||||
return (0); | |||||
} | |||||
int | |||||
intr_irq_describe(u_int irq, void *cookie, const char *descr) | |||||
{ | |||||
struct intr_irqsrc *isrc; | |||||
int error; | |||||
isrc = isrc_lookup(irq); | |||||
if (isrc == NULL || isrc->isrc_handlers == 0) | if (isrc == NULL || isrc->isrc_handlers == 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
if (isrc->isrc_filter != NULL) { | if (isrc->isrc_filter != NULL) { | ||||
if (isrc != cookie) | if (isrc != cookie) | ||||
return (EINVAL); | return (EINVAL); | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
isrc_update_name(isrc, descr); | isrc_update_name(isrc, descr); | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
return (0); | return (0); | ||||
} | } | ||||
#endif | #endif | ||||
error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); | error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); | ||||
if (error == 0) { | if (error == 0) { | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
intrcnt_updatename(isrc); | intrcnt_updatename(isrc); | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
#ifdef SMP | #ifdef SMP | ||||
int | int | ||||
intr_irq_bind(u_int irq, int cpu) | intr_bind_irq(device_t dev, struct resource *res, int cpu) | ||||
{ | { | ||||
struct intr_irqsrc *isrc; | struct intr_irqsrc *isrc; | ||||
isrc = isrc_lookup(irq); | KASSERT(rman_get_start(res) == rman_get_end(res), | ||||
("%s: more interrupts in resource", __func__)); | |||||
isrc = intr_ddata_lookup(rman_get_start(res), NULL); | |||||
if (isrc == NULL || isrc->isrc_handlers == 0) | if (isrc == NULL || isrc->isrc_handlers == 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
#ifdef INTR_SOLO | #ifdef INTR_SOLO | ||||
if (isrc->isrc_filter != NULL) | if (isrc->isrc_filter != NULL) | ||||
return (intr_isrc_assign_cpu(isrc, cpu)); | return (intr_isrc_assign_cpu(isrc, cpu)); | ||||
#endif | #endif | ||||
return (intr_event_bind(isrc->isrc_event, cpu)); | return (intr_event_bind(isrc->isrc_event, cpu)); | ||||
} | } | ||||
Show All 30 Lines | intr_irq_shuffle(void *arg __unused) | ||||
if (mp_ncpus == 1) | if (mp_ncpus == 1) | ||||
return; | return; | ||||
mtx_lock(&isrc_table_lock); | mtx_lock(&isrc_table_lock); | ||||
irq_assign_cpu = TRUE; | irq_assign_cpu = TRUE; | ||||
for (i = 0; i < NIRQ; i++) { | for (i = 0; i < NIRQ; i++) { | ||||
isrc = irq_sources[i]; | isrc = irq_sources[i]; | ||||
if (isrc == NULL || isrc->isrc_handlers == 0 || | if (isrc == NULL || isrc->isrc_handlers == 0 || | ||||
isrc->isrc_flags & INTR_ISRCF_PERCPU) | isrc->isrc_flags & INTR_ISRCF_PPI) | ||||
continue; | continue; | ||||
if (isrc->isrc_event != NULL && | if (isrc->isrc_event != NULL && | ||||
isrc->isrc_flags & INTR_ISRCF_BOUND && | isrc->isrc_flags & INTR_ISRCF_BOUND && | ||||
isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) | isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) | ||||
panic("%s: CPU inconsistency", __func__); | panic("%s: CPU inconsistency", __func__); | ||||
if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) | if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) | ||||
CPU_ZERO(&isrc->isrc_cpu); /* start again */ | CPU_ZERO(&isrc->isrc_cpu); /* start again */ | ||||
/* | /* | ||||
* We are in wicked position here if the following call fails | * We are in wicked position here if the following call fails | ||||
* for bound ISRC. The best thing we can do is to clear | * for bound ISRC. The best thing we can do is to clear | ||||
* isrc_cpu so inconsistency with ie_cpu will be detectable. | * isrc_cpu so inconsistency with ie_cpu will be detectable. | ||||
*/ | */ | ||||
if (PIC_BIND(isrc->isrc_dev, isrc) != 0) | if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) | ||||
CPU_ZERO(&isrc->isrc_cpu); | CPU_ZERO(&isrc->isrc_cpu); | ||||
} | } | ||||
mtx_unlock(&isrc_table_lock); | mtx_unlock(&isrc_table_lock); | ||||
} | } | ||||
SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); | SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); | ||||
#else | #else | ||||
u_int | u_int | ||||
Show All 28 Lines | intr_pic_init_secondary(void) | ||||
//mtx_unlock(&isrc_table_lock); | //mtx_unlock(&isrc_table_lock); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef DDB | #ifdef DDB | ||||
DB_SHOW_COMMAND(irqs, db_show_irqs) | DB_SHOW_COMMAND(irqs, db_show_irqs) | ||||
{ | { | ||||
u_int i, irqsum; | u_int i, irqsum; | ||||
u_long num; | |||||
struct intr_irqsrc *isrc; | struct intr_irqsrc *isrc; | ||||
for (irqsum = 0, i = 0; i < NIRQ; i++) { | for (irqsum = 0, i = 0; i < NIRQ; i++) { | ||||
isrc = irq_sources[i]; | isrc = irq_sources[i]; | ||||
if (isrc == NULL) | if (isrc == NULL) | ||||
continue; | continue; | ||||
num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; | |||||
db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, | db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, | ||||
isrc->isrc_name, isrc->isrc_cpu.__bits[0], | isrc->isrc_name, isrc->isrc_cpu.__bits[0], | ||||
isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", | isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); | ||||
isrc->isrc_count[0]); | irqsum += num; | ||||
irqsum += isrc->isrc_count[0]; | |||||
} | } | ||||
db_printf("irq total %u\n", irqsum); | db_printf("irq total %u\n", irqsum); | ||||
} | } | ||||
#endif | #endif |
This assumes a 32 bit u_int