Index: projects/powernv/powerpc/powerpc/intr_machdep.c =================================================================== --- projects/powernv/powerpc/powerpc/intr_machdep.c (revision 304725) +++ projects/powernv/powerpc/powerpc/intr_machdep.c (revision 304726) @@ -1,643 +1,643 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 * * $FreeBSD$ */ #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #define MAX_STRAY_LOG 5 static MALLOC_DEFINE(M_INTR, "intr", "interrupt handler data"); struct powerpc_intr { struct intr_event *event; long *cntp; u_int irq; device_t pic; u_int intline; u_int vector; u_int cntindex; cpuset_t cpu; enum intr_trigger trig; enum intr_polarity pol; int fwcode; int ipi; }; struct pic { device_t dev; uint32_t node; u_int irqs; u_int ipis; int base; }; static u_int intrcnt_index = 0; static struct mtx intr_table_lock; static struct powerpc_intr *powerpc_intrs[INTR_VECTORS]; static struct pic piclist[MAX_PICS]; static u_int nvectors; /* Allocated vectors */ static u_int npics; /* PICs registered */ #ifdef DEV_ISA static u_int nirqs = 16; /* Allocated IRQS (ISA pre-allocated). */ #else static u_int nirqs = 0; /* Allocated IRQs. */ #endif static u_int stray_count; u_long intrcnt[INTR_VECTORS]; char intrnames[INTR_VECTORS * MAXCOMLEN]; size_t sintrcnt = sizeof(intrcnt); size_t sintrnames = sizeof(intrnames); device_t root_pic; #ifdef SMP static void *ipi_cookie; #endif static void intr_init(void *dummy __unused) { mtx_init(&intr_table_lock, "intr sources lock", NULL, MTX_DEF); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); #ifdef SMP static void smp_intr_init(void *dummy __unused) { struct powerpc_intr *i; int vector; for (vector = 0; vector < nvectors; vector++) { i = powerpc_intrs[vector]; if (i != NULL && i->event != NULL && i->pic == root_pic) PIC_BIND(i->pic, i->intline, i->cpu); } } SYSINIT(smp_intr_init, SI_SUB_SMP, SI_ORDER_ANY, smp_intr_init, NULL); #endif static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } void intrcnt_add(const char *name, u_long **countp) { int idx; idx = atomic_fetchadd_int(&intrcnt_index, 1); *countp = &intrcnt[idx]; intrcnt_setname(name, idx); } static struct powerpc_intr * intr_lookup(u_int irq) { char intrname[16]; struct powerpc_intr *i, *iscan; int vector; mtx_lock(&intr_table_lock); for (vector = 0; vector < nvectors; vector++) { i = powerpc_intrs[vector]; if (i != NULL && i->irq == irq) { mtx_unlock(&intr_table_lock); return (i); } } i = malloc(sizeof(*i), M_INTR, M_NOWAIT); if (i == NULL) { mtx_unlock(&intr_table_lock); return (NULL); } i->event = NULL; i->cntp = NULL; i->trig = INTR_TRIGGER_CONFORM; i->pol = INTR_POLARITY_CONFORM; i->irq = irq; i->pic = NULL; i->vector = -1; i->fwcode = 0; i->ipi = 0; #ifdef SMP i->cpu = all_cpus; #else CPU_SETOF(0, &i->cpu); #endif for (vector = 0; vector < INTR_VECTORS && vector <= nvectors; vector++) { iscan = powerpc_intrs[vector]; if (iscan != NULL && iscan->irq == irq) break; if (iscan == NULL && i->vector == -1) i->vector = vector; iscan = NULL; } if (iscan == NULL && i->vector != -1) { powerpc_intrs[i->vector] = i; i->cntindex = atomic_fetchadd_int(&intrcnt_index, 1); i->cntp = &intrcnt[i->cntindex]; sprintf(intrname, "irq%u:", i->irq); intrcnt_setname(intrname, i->cntindex); nvectors++; } mtx_unlock(&intr_table_lock); if (iscan != NULL || i->vector == -1) { free(i, M_INTR); i = iscan; } return (i); } static int powerpc_map_irq(struct powerpc_intr *i) { struct pic *p; u_int cnt; int idx; for (idx = 0; idx < npics; idx++) { p = &piclist[idx]; cnt = p->irqs + p->ipis; if (i->irq >= p->base && i->irq < p->base + cnt) break; } if (idx == npics) return (EINVAL); i->intline = i->irq - p->base; i->pic = p->dev; /* Try a best guess if that failed */ if (i->pic == NULL) i->pic = root_pic; return (0); } static void powerpc_intr_eoi(void *arg) { struct powerpc_intr *i = arg; PIC_EOI(i->pic, i->intline); } static void powerpc_intr_pre_ithread(void *arg) { struct powerpc_intr *i = arg; PIC_MASK(i->pic, i->intline); PIC_EOI(i->pic, i->intline); } static void powerpc_intr_post_ithread(void *arg) { struct powerpc_intr *i = arg; PIC_UNMASK(i->pic, i->intline); } static int powerpc_assign_intr_cpu(void *arg, int cpu) { #ifdef SMP struct powerpc_intr *i = arg; if (cpu == NOCPU) i->cpu = all_cpus; else CPU_SETOF(cpu, &i->cpu); if (!cold && i->pic != NULL && i->pic == root_pic) PIC_BIND(i->pic, i->intline, i->cpu); return (0); #else return (EOPNOTSUPP); #endif } u_int powerpc_register_pic(device_t dev, uint32_t node, u_int irqs, u_int ipis, u_int atpic) { struct pic *p; u_int irq; int idx; mtx_lock(&intr_table_lock); /* XXX see powerpc_get_irq(). */ for (idx = 0; idx < npics; idx++) { p = &piclist[idx]; if (p->node != node) continue; if (node != 0 || p->dev == dev) break; } p = &piclist[idx]; p->dev = dev; p->node = node; p->irqs = irqs; p->ipis = ipis; if (idx == npics) { #ifdef DEV_ISA p->base = (atpic) ? 0 : nirqs; #else p->base = nirqs; #endif irq = p->base + irqs + ipis; nirqs = MAX(nirqs, irq); npics++; } KASSERT(npics < MAX_PICS, ("Number of PICs exceeds maximum (%d)", MAX_PICS)); mtx_unlock(&intr_table_lock); return (p->base); } u_int powerpc_get_irq(uint32_t node, u_int pin) { int idx; if (node == 0) return (pin); mtx_lock(&intr_table_lock); for (idx = 0; idx < npics; idx++) { if (piclist[idx].node == node) { mtx_unlock(&intr_table_lock); return (piclist[idx].base + pin); } } /* * XXX we should never encounter an unregistered PIC, but that * can only be done when we properly support bus enumeration * using multiple passes. Until then, fake an entry and give it * some adhoc maximum number of IRQs and IPIs. */ piclist[idx].dev = NULL; piclist[idx].node = node; piclist[idx].irqs = 124; piclist[idx].ipis = 4; piclist[idx].base = nirqs; - nirqs += (1 << 24); + nirqs += (1 << 25); npics++; KASSERT(npics < MAX_PICS, ("Number of PICs exceeds maximum (%d)", MAX_PICS)); mtx_unlock(&intr_table_lock); return (piclist[idx].base + pin); } int powerpc_enable_intr(void) { struct powerpc_intr *i; int error, vector; #ifdef SMP int n; #endif if (npics == 0) panic("no PIC detected\n"); if (root_pic == NULL) root_pic = piclist[0].dev; #ifdef SMP /* Install an IPI handler. */ if (mp_ncpus > 1) { for (n = 0; n < npics; n++) { if (piclist[n].dev != root_pic) continue; KASSERT(piclist[n].ipis != 0, ("%s: SMP root PIC does not supply any IPIs", __func__)); error = powerpc_setup_intr("IPI", MAP_IRQ(piclist[n].node, piclist[n].irqs), powerpc_ipi_handler, NULL, NULL, INTR_TYPE_MISC | INTR_EXCL, &ipi_cookie); if (error) { printf("unable to setup IPI handler\n"); return (error); } /* * Some subterfuge: disable late EOI and mark this * as an IPI to the dispatch layer. */ i = intr_lookup(MAP_IRQ(piclist[n].node, piclist[n].irqs)); i->event->ie_post_filter = NULL; i->ipi = 1; } } #endif for (vector = 0; vector < nvectors; vector++) { i = powerpc_intrs[vector]; if (i == NULL) continue; error = powerpc_map_irq(i); if (error) continue; if (i->trig == -1) PIC_TRANSLATE_CODE(i->pic, i->intline, i->fwcode, &i->trig, &i->pol); if (i->trig != INTR_TRIGGER_CONFORM || i->pol != INTR_POLARITY_CONFORM) PIC_CONFIG(i->pic, i->intline, i->trig, i->pol); if (i->event != NULL) PIC_ENABLE(i->pic, i->intline, vector); } return (0); } int powerpc_setup_intr(const char *name, u_int irq, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct powerpc_intr *i; int error, enable = 0; i = intr_lookup(irq); if (i == NULL) return (ENOMEM); if (i->event == NULL) { error = intr_event_create(&i->event, (void *)i, 0, irq, powerpc_intr_pre_ithread, powerpc_intr_post_ithread, powerpc_intr_eoi, powerpc_assign_intr_cpu, "irq%u:", irq); if (error) return (error); enable = 1; } error = intr_event_add_handler(i->event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); mtx_lock(&intr_table_lock); intrcnt_setname(i->event->ie_fullname, i->cntindex); mtx_unlock(&intr_table_lock); if (!cold) { error = powerpc_map_irq(i); if (!error) { if (i->trig == -1) PIC_TRANSLATE_CODE(i->pic, i->intline, i->fwcode, &i->trig, &i->pol); if (i->trig != INTR_TRIGGER_CONFORM || i->pol != INTR_POLARITY_CONFORM) PIC_CONFIG(i->pic, i->intline, i->trig, i->pol); if (i->pic == root_pic) PIC_BIND(i->pic, i->intline, i->cpu); if (enable) PIC_ENABLE(i->pic, i->intline, i->vector); } } return (error); } int powerpc_teardown_intr(void *cookie) { return (intr_event_remove_handler(cookie)); } #ifdef SMP int powerpc_bind_intr(u_int irq, u_char cpu) { struct powerpc_intr *i; i = intr_lookup(irq); if (i == NULL) return (ENOMEM); return (intr_event_bind(i->event, cpu)); } #endif int powerpc_fw_config_intr(int irq, int sense_code) { struct powerpc_intr *i; i = intr_lookup(irq); if (i == NULL) return (ENOMEM); i->trig = -1; i->pol = INTR_POLARITY_CONFORM; i->fwcode = sense_code; if (!cold && i->pic != NULL) { PIC_TRANSLATE_CODE(i->pic, i->intline, i->fwcode, &i->trig, &i->pol); PIC_CONFIG(i->pic, i->intline, i->trig, i->pol); } return (0); } int powerpc_config_intr(int irq, enum intr_trigger trig, enum intr_polarity pol) { struct powerpc_intr *i; i = intr_lookup(irq); if (i == NULL) return (ENOMEM); i->trig = trig; i->pol = pol; if (!cold && i->pic != NULL) PIC_CONFIG(i->pic, i->intline, trig, pol); return (0); } void powerpc_dispatch_intr(u_int vector, struct trapframe *tf) { struct powerpc_intr *i; struct intr_event *ie; i = powerpc_intrs[vector]; if (i == NULL) goto stray; (*i->cntp)++; ie = i->event; KASSERT(ie != NULL, ("%s: interrupt without an event", __func__)); /* * IPIs are magical and need to be EOI'ed before filtering. * This prevents races in IPI handling. */ if (i->ipi) PIC_EOI(i->pic, i->intline); if (intr_event_handle(ie, tf) != 0) { goto stray; } return; stray: stray_count++; if (stray_count <= MAX_STRAY_LOG) { printf("stray irq %d\n", i ? i->irq : -1); if (stray_count >= MAX_STRAY_LOG) { printf("got %d stray interrupts, not logging anymore\n", MAX_STRAY_LOG); } } if (i != NULL) PIC_MASK(i->pic, i->intline); } void powerpc_intr_mask(u_int irq) { struct powerpc_intr *i; i = intr_lookup(irq); if (i == NULL || i->pic == NULL) return; PIC_MASK(i->pic, i->intline); } void powerpc_intr_unmask(u_int irq) { struct powerpc_intr *i; i = intr_lookup(irq); if (i == NULL || i->pic == NULL) return; PIC_UNMASK(i->pic, i->intline); } Index: projects/powernv/powerpc/powerpc/mp_machdep.c =================================================================== --- projects/powernv/powerpc/powerpc/mp_machdep.c (revision 304725) +++ projects/powernv/powerpc/powerpc/mp_machdep.c (revision 304726) @@ -1,363 +1,364 @@ /*- * Copyright (c) 2008 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" extern struct pcpu __pcpu[MAXCPU]; volatile static int ap_awake; volatile static u_int ap_letgo; volatile static u_quad_t ap_timebase; static u_int ipi_msg_cnt[32]; static struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; void machdep_ap_bootstrap(void) { /* Set PIR */ PCPU_SET(pir, mfspr(SPR_PIR)); PCPU_SET(awake, 1); __asm __volatile("msync; isync"); while (ap_letgo == 0) - ; + __asm __volatile("or 27,27,27"); + __asm __volatile("or 6,6,6"); /* Initialize DEC and TB, sync with the BSP values */ platform_smp_timebase_sync(ap_timebase, 1); decr_ap_init(); /* Give platform code a chance to do anything necessary */ platform_smp_ap_init(); /* Serialize console output and AP count increment */ mtx_lock_spin(&ap_boot_mtx); ap_awake++; printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid)); mtx_unlock_spin(&ap_boot_mtx); /* Start per-CPU event timers. */ cpu_initclocks_ap(); /* Announce ourselves awake, and enter the scheduler */ sched_throw(NULL); } void cpu_mp_setmaxid(void) { struct cpuref cpuref; int error; mp_ncpus = 0; mp_maxid = 0; error = platform_smp_first_cpu(&cpuref); while (!error) { mp_ncpus++; mp_maxid = max(cpuref.cr_cpuid, mp_maxid); error = platform_smp_next_cpu(&cpuref); } /* Sanity. */ if (mp_ncpus == 0) mp_ncpus = 1; } int cpu_mp_probe(void) { /* * We're not going to enable SMP if there's only 1 processor. */ return (mp_ncpus > 1); } void cpu_mp_start(void) { struct cpuref bsp, cpu; struct pcpu *pc; int error; error = platform_smp_get_bsp(&bsp); KASSERT(error == 0, ("Don't know BSP")); error = platform_smp_first_cpu(&cpu); while (!error) { if (cpu.cr_cpuid >= MAXCPU) { printf("SMP: cpu%d: skipped -- ID out of range\n", cpu.cr_cpuid); goto next; } if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) { printf("SMP: cpu%d: skipped - duplicate ID\n", cpu.cr_cpuid); goto next; } if (cpu.cr_cpuid != bsp.cr_cpuid) { void *dpcpu; pc = &__pcpu[cpu.cr_cpuid]; dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc)); dpcpu_init(dpcpu, cpu.cr_cpuid); } else { pc = pcpup; pc->pc_cpuid = bsp.cr_cpuid; pc->pc_bsp = 1; } pc->pc_hwref = cpu.cr_hwref; CPU_SET(pc->pc_cpuid, &all_cpus); next: error = platform_smp_next_cpu(&cpu); } } void cpu_mp_announce(void) { struct pcpu *pc; int i; if (!bootverbose) return; CPU_FOREACH(i) { pc = pcpu_find(i); if (pc == NULL) continue; printf("cpu%d: dev=%x", i, (int)pc->pc_hwref); if (pc->pc_bsp) printf(" (BSP)"); printf("\n"); } } static void cpu_mp_unleash(void *dummy) { struct pcpu *pc; int cpus, timeout; if (mp_ncpus <= 1) return; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); cpus = 0; smp_cpus = 0; #ifdef BOOKE tlb1_ap_prep(); #endif STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { cpus++; if (!pc->pc_bsp) { if (bootverbose) printf("Waking up CPU %d (dev=%x)\n", pc->pc_cpuid, (int)pc->pc_hwref); platform_smp_start_cpu(pc); timeout = 2000; /* wait 2sec for the AP */ while (!pc->pc_awake && --timeout > 0) DELAY(1000); } else { PCPU_SET(pir, mfspr(SPR_PIR)); pc->pc_awake = 1; } if (pc->pc_awake) { if (bootverbose) printf("Adding CPU %d, pir=%x, awake=%x\n", pc->pc_cpuid, pc->pc_pir, pc->pc_awake); smp_cpus++; } else CPU_SET(pc->pc_cpuid, &stopped_cpus); } ap_awake = 1; /* Provide our current DEC and TB values for APs */ ap_timebase = mftb() + 10; __asm __volatile("msync; isync"); /* Let APs continue */ atomic_store_rel_int(&ap_letgo, 1); platform_smp_timebase_sync(ap_timebase, 0); while (ap_awake < smp_cpus) ; if (smp_cpus != cpus || cpus != mp_ncpus) { printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", mp_ncpus, cpus, smp_cpus); } /* Let the APs get into the scheduler */ DELAY(10000); /* XXX Atomic set operation? */ smp_started = 1; } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); int powerpc_ipi_handler(void *arg) { u_int cpuid; uint32_t ipimask; int msg; CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr()); ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask)); if (ipimask == 0) return (FILTER_STRAY); while ((msg = ffs(ipimask) - 1) != -1) { ipimask &= ~(1u << msg); ipi_msg_cnt[msg]++; switch (msg) { case IPI_AST: CTR1(KTR_SMP, "%s: IPI_AST", __func__); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_RENDEZVOUS: CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__); smp_rendezvous_action(); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add such case in the switch. */ CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)", __func__); cpuid = PCPU_GET(cpuid); savectx(&stoppcbs[cpuid]); savectx(PCPU_GET(curpcb)); CPU_SET_ATOMIC(cpuid, &stopped_cpus); while (!CPU_ISSET(cpuid, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpuid, &stopped_cpus); CPU_CLR_ATOMIC(cpuid, &started_cpus); CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; } } return (FILTER_HANDLED); } static void ipi_send(struct pcpu *pc, int ipi) { CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__, pc, pc->pc_cpuid, ipi); atomic_set_32(&pc->pc_ipimask, (1 << ipi)); powerpc_sync(); PIC_IPI(root_pic, pc->pc_cpuid); CTR1(KTR_SMP, "%s: sent", __func__); } /* Send an IPI to a set of cpus. */ void ipi_selected(cpuset_t cpus, int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (CPU_ISSET(pc->pc_cpuid, &cpus)) ipi_send(pc, ipi); } } /* Send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { ipi_send(cpuid_to_pcpu[cpu], ipi); } /* Send an IPI to all CPUs EXCEPT myself. */ void ipi_all_but_self(int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) ipi_send(pc, ipi); } } Index: projects/powernv/powerpc/pseries/xics.c =================================================================== --- projects/powernv/powerpc/pseries/xics.c (revision 304725) +++ projects/powernv/powerpc/pseries/xics.c (revision 304726) @@ -1,460 +1,463 @@ /*- * Copyright 2011 Nathan Whitehorn * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "phyp-hvcall.h" #include "pic_if.h" #define XICP_PRIORITY 5 /* Random non-zero number */ #define XICP_IPI 2 #define MAX_XICP_IRQS (1<<24) /* 24-bit XIRR field */ static int xicp_probe(device_t); static int xicp_attach(device_t); static int xics_probe(device_t); static int xics_attach(device_t); static void xicp_bind(device_t dev, u_int irq, cpuset_t cpumask); static void xicp_dispatch(device_t, struct trapframe *); static void xicp_enable(device_t, u_int, u_int); static void xicp_eoi(device_t, u_int); static void xicp_ipi(device_t, u_int); static void xicp_mask(device_t, u_int); static void xicp_unmask(device_t, u_int); static device_method_t xicp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xicp_probe), DEVMETHOD(device_attach, xicp_attach), /* PIC interface */ DEVMETHOD(pic_bind, xicp_bind), DEVMETHOD(pic_dispatch, xicp_dispatch), DEVMETHOD(pic_enable, xicp_enable), DEVMETHOD(pic_eoi, xicp_eoi), DEVMETHOD(pic_ipi, xicp_ipi), DEVMETHOD(pic_mask, xicp_mask), DEVMETHOD(pic_unmask, xicp_unmask), DEVMETHOD_END }; static device_method_t xics_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xics_probe), DEVMETHOD(device_attach, xics_attach), DEVMETHOD_END }; struct xicp_softc { struct mtx sc_mtx; struct resource *mem[MAXCPU]; int cpu_range[2]; int ibm_int_on; int ibm_int_off; int ibm_get_xive; int ibm_set_xive; /* XXX: inefficient -- hash table? tree? */ struct { int irq; int vector; int cpu; } intvecs[256]; int nintvecs; }; static driver_t xicp_driver = { "xicp", xicp_methods, sizeof(struct xicp_softc) }; static driver_t xics_driver = { "xics", xics_methods, 0 }; static devclass_t xicp_devclass; static devclass_t xics_devclass; EARLY_DRIVER_MODULE(xicp, ofwbus, xicp_driver, xicp_devclass, 0, 0, BUS_PASS_INTERRUPT-1); EARLY_DRIVER_MODULE(xics, ofwbus, xics_driver, xics_devclass, 0, 0, BUS_PASS_INTERRUPT); static struct resource * xicp_mem_for_cpu(int cpu) { device_t dev; struct xicp_softc *sc; int i; for (i = 0; (dev = devclass_get_device(xicp_devclass, i)) != NULL; i++){ sc = device_get_softc(dev); if (cpu >= sc->cpu_range[0] && cpu < sc->cpu_range[1]) return (sc->mem[cpu - sc->cpu_range[0]]); } return (NULL); } static int xicp_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "ibm,ppc-xicp")) return (ENXIO); device_set_desc(dev, "External Interrupt Presentation Controller"); return (BUS_PROBE_GENERIC); } static int xics_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "ibm,ppc-xics")) return (ENXIO); device_set_desc(dev, "External Interrupt Source Controller"); return (BUS_PROBE_GENERIC); } static int xicp_attach(device_t dev) { struct xicp_softc *sc = device_get_softc(dev); phandle_t phandle = ofw_bus_get_node(dev); int i = 0; if (rtas_exists()) { sc->ibm_int_on = rtas_token_lookup("ibm,int-on"); sc->ibm_int_off = rtas_token_lookup("ibm,int-off"); sc->ibm_set_xive = rtas_token_lookup("ibm,set-xive"); sc->ibm_get_xive = rtas_token_lookup("ibm,get-xive"); } else if (opal_check() == 0) { /* No init needed */ } else { device_printf(dev, "Cannot attach without RTAS or OPAL\n"); return (ENXIO); } if (OF_hasprop(phandle, "ibm,interrupt-server-ranges")) { OF_getencprop(phandle, "ibm,interrupt-server-ranges", sc->cpu_range, sizeof(sc->cpu_range)); sc->cpu_range[1] += sc->cpu_range[0]; device_printf(dev, "Handling CPUs %d-%d\n", sc->cpu_range[0], sc->cpu_range[1]-1); } else { sc->cpu_range[0] = 0; sc->cpu_range[1] = mp_ncpus; } if (mfmsr() & PSL_HV) { for (i = 0; i < sc->cpu_range[1] - sc->cpu_range[0]; i++) { sc->mem[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem[i] == NULL) { device_printf(dev, "Could not alloc mem " "resource %d\n", i); return (ENXIO); } /* Unmask interrupts on all cores */ bus_write_1(sc->mem[i], 4, 0xff); bus_write_1(sc->mem[i], 12, 0xff); } } mtx_init(&sc->sc_mtx, "XICP", NULL, MTX_DEF); sc->nintvecs = 0; powerpc_register_pic(dev, OF_xref_from_node(phandle), MAX_XICP_IRQS, 1 /* Number of IPIs */, FALSE); root_pic = dev; return (0); } static int xics_attach(device_t dev) { phandle_t phandle = ofw_bus_get_node(dev); /* The XICP (root PIC) will handle all our interrupts */ powerpc_register_pic(root_pic, OF_xref_from_node(phandle), MAX_XICP_IRQS, 1 /* Number of IPIs */, FALSE); return (0); } /* * PIC I/F methods. */ static void xicp_bind(device_t dev, u_int irq, cpuset_t cpumask) { struct xicp_softc *sc = device_get_softc(dev); cell_t status, cpu; int ncpus, i, error; + /* Ignore IPIs */ + if (irq == MAX_XICP_IRQS) + return; + /* * This doesn't appear to actually support affinity groups, so pick a * random CPU. */ ncpus = 0; CPU_FOREACH(cpu) if (CPU_ISSET(cpu, &cpumask)) ncpus++; i = mftb() % ncpus; ncpus = 0; CPU_FOREACH(cpu) { if (!CPU_ISSET(cpu, &cpumask)) continue; if (ncpus == i) break; ncpus++; } cpu = pcpu_find(cpu)->pc_hwref; /* XXX: super inefficient */ for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { sc->intvecs[i].cpu = cpu; break; } } KASSERT(i < sc->nintvecs, ("Binding non-configured interrupt")); if (rtas_exists()) error = rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, XICP_PRIORITY, &status); else error = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); if (error < 0) panic("Cannot bind interrupt %d to CPU %d: %d", irq, cpu, error); } static void xicp_dispatch(device_t dev, struct trapframe *tf) { struct xicp_softc *sc; struct resource *regs = NULL; uint64_t xirr, junk; int i; if (mfmsr() & PSL_HV) { regs = xicp_mem_for_cpu(PCPU_GET(hwref)); KASSERT(regs != NULL, ("Can't find regs for CPU %ld", PCPU_GET(hwref))); } sc = device_get_softc(dev); for (;;) { if (regs) { xirr = bus_read_4(regs, 4); } else { /* Return value in R4, use the PFT call */ phyp_pft_hcall(H_XIRR, 0, 0, 0, 0, &xirr, &junk, &junk); } xirr &= 0x00ffffff; if (xirr == 0) { /* No more pending interrupts? */ if (regs) bus_write_1(regs, 4, 0xff); else phyp_hcall(H_CPPR, (uint64_t)0xff); break; } if (xirr == XICP_IPI) { /* Magic number for IPIs */ xirr = MAX_XICP_IRQS; /* Map to FreeBSD magic */ /* Clear IPI */ if (regs) bus_write_1(regs, 12, 0xff); else phyp_hcall(H_IPI, (uint64_t)(PCPU_GET(hwref)), 0xff); } /* XXX: super inefficient */ for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == xirr) break; } -//printf("Interrupt %ld\n", xirr); KASSERT(i < sc->nintvecs, ("Unmapped XIRR")); powerpc_dispatch_intr(sc->intvecs[i].vector, tf); } } static void xicp_enable(device_t dev, u_int irq, u_int vector) { struct xicp_softc *sc; cell_t status, cpu; sc = device_get_softc(dev); KASSERT(sc->nintvecs + 1 < nitems(sc->intvecs), ("Too many XICP interrupts")); /* Bind to this CPU to start: distrib. ID is last entry in gserver# */ cpu = PCPU_GET(hwref); mtx_lock(&sc->sc_mtx); sc->intvecs[sc->nintvecs].irq = irq; sc->intvecs[sc->nintvecs].vector = vector; sc->intvecs[sc->nintvecs].cpu = cpu; mb(); sc->nintvecs++; mtx_unlock(&sc->sc_mtx); /* IPIs are also enabled */ if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, XICP_PRIORITY, &status); xicp_unmask(dev, irq); } else { status = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); /* Unmask implicit for OPAL */ if (status != 0) panic("OPAL_SET_XIVE IRQ %d -> cpu %d failed: %d", irq, cpu, status); } } static void xicp_eoi(device_t dev, u_int irq) { uint64_t xirr; if (irq == MAX_XICP_IRQS) /* Remap IPI interrupt to internal value */ irq = XICP_IPI; xirr = irq | (XICP_PRIORITY << 24); if (mfmsr() & PSL_HV) bus_write_4(xicp_mem_for_cpu(PCPU_GET(hwref)), 4, xirr); else phyp_hcall(H_EOI, xirr); } static void xicp_ipi(device_t dev, u_int cpu) { cpu = pcpu_find(cpu)->pc_hwref; if (mfmsr() & PSL_HV) bus_write_1(xicp_mem_for_cpu(cpu), 12, XICP_PRIORITY); else phyp_hcall(H_IPI, (uint64_t)cpu, XICP_PRIORITY); } static void xicp_mask(device_t dev, u_int irq) { struct xicp_softc *sc = device_get_softc(dev); cell_t status; int i; if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_int_off, 1, 1, irq, &status); } else { for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { break; } } KASSERT(i < sc->nintvecs, ("Masking unconfigured interrupt")); opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, 0xff); } } static void xicp_unmask(device_t dev, u_int irq) { struct xicp_softc *sc = device_get_softc(dev); cell_t status; int i; if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_int_on, 1, 1, irq, &status); } else { for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { break; } } KASSERT(i < sc->nintvecs, ("Unmasking unconfigured interrupt")); opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, XICP_PRIORITY); } }