Index: head/sys/alpha/alpha/mp_machdep.c =================================================================== --- head/sys/alpha/alpha/mp_machdep.c (revision 152021) +++ head/sys/alpha/alpha/mp_machdep.c (revision 152022) @@ -1,597 +1,599 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Set to 1 once we're ready to let the APs out of the pen. */ static volatile int aps_ready = 0; static struct mtx ap_boot_mtx; u_int64_t boot_cpu_id; +struct pcb stoppcbs[MAXCPU]; static void release_aps(void *dummy); static int smp_cpu_enabled(struct pcs *pcsp); extern void smp_init_secondary_glue(void); static int smp_send_secondary_command(const char *command, int pal_id); static int smp_start_secondary(int pal_id, int cpuid); /* * Communicate with a console running on a secondary processor. * Return 1 on failure. */ static int smp_send_secondary_command(const char *command, int pal_id) { u_int64_t mask = 1L << pal_id; struct pcs *cpu = LOCATE_PCS(hwrpb, pal_id); int i, len; /* * Sanity check. */ len = strlen(command); if (len > sizeof(cpu->pcs_buffer.rxbuf)) { printf("smp_send_secondary_command: command '%s' too long\n", command); return 0; } /* * Wait for the rx bit to clear. */ for (i = 0; i < 100000; i++) { if (!(hwrpb->rpb_rxrdy & mask)) break; DELAY(10); } if (hwrpb->rpb_rxrdy & mask) return 0; /* * Write the command into the processor's buffer. */ bcopy(command, cpu->pcs_buffer.rxbuf, len); cpu->pcs_buffer.rxlen = len; /* * Set the bit in the rxrdy mask and let the secondary try to * handle the command. */ atomic_set_64(&hwrpb->rpb_rxrdy, mask); /* * Wait for the rx bit to clear. */ for (i = 0; i < 100000; i++) { if (!(hwrpb->rpb_rxrdy & mask)) break; DELAY(10); } if (hwrpb->rpb_rxrdy & mask) return 0; return 1; } void smp_init_secondary(void) { struct pcs *cpu; /* spin until all the AP's are ready */ while (!aps_ready) /*spin*/ ; /* * Record the pcpu pointer in the per-cpu system value. */ alpha_pal_wrval((u_int64_t) pcpup); /* Clear userland thread pointer. */ alpha_pal_wrunique(0); /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); /* * Point interrupt/exception vectors to our own. */ alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT); alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH); alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM); alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF); alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA); alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS); /* lower the ipl and take any pending machine check */ mc_expected = 1; alpha_mb(); alpha_mb(); alpha_pal_wrmces(7); (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); mc_expected = 0; /* * Set flags in our per-CPU slot in the HWRPB. */ cpu = LOCATE_PCS(hwrpb, PCPU_GET(pal_id)); cpu->pcs_flags &= ~PCS_BIP; cpu->pcs_flags |= PCS_RC; alpha_mb(); /* * XXX: doesn't idleproc already have a pcb from when it was * kthread_create'd? * * cache idleproc's physical address. */ curthread->td_md.md_pcbpaddr = (struct pcb *)PCPU_GET(idlepcbphys); /* * and make idleproc's trapframe pointer point to its * stack pointer for sanity. */ curthread->td_frame = (struct trapframe *)PCPU_PTR(idlepcb)->apcb_ksp; mtx_lock_spin(&ap_boot_mtx); smp_cpus++; CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); /* Build our map of 'other' CPUs. */ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); if (smp_cpus == mp_ncpus) { smp_started = 1; smp_active = 1; } mtx_unlock_spin(&ap_boot_mtx); while (smp_started == 0) ; /* nothing */ /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); /* * Correct spinlock nesting. The idle thread context that we are * borrowing was created so that it would start out with a single * spin lock (sched_lock) held in fork_trampoline(). Since we've * explicitly acquired locks in this function, the nesting count * is now 2 rather than 1. Since we are nested, calling * spinlock_exit() will simply adjust the counts without allowing * spin lock using code to interrupt us. */ spinlock_exit(); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ panic("scheduler returned us to %s", __func__); } static int smp_start_secondary(int pal_id, int cpuid) { struct pcs *cpu = LOCATE_PCS(hwrpb, pal_id); struct pcs *bootcpu = LOCATE_PCS(hwrpb, boot_cpu_id); struct alpha_pcb *pcb = (struct alpha_pcb *) cpu->pcs_hwpcb; struct pcpu *pcpu; int i; size_t sz; if ((cpu->pcs_flags & PCS_PV) == 0) { printf("smp_start_secondary: cpu %d PALcode invalid\n", pal_id); return 0; } if (bootverbose) printf("smp_start_secondary: starting cpu %d\n", pal_id); sz = KSTACK_PAGES * PAGE_SIZE; pcpu = malloc(sz, M_TEMP, M_NOWAIT); if (!pcpu) { printf("smp_start_secondary: can't allocate memory\n"); return 0; } pcpu_init(pcpu, cpuid, sz); pcpu->pc_pal_id = pal_id; /* * Copy the idle pcb and setup the address to start executing. * Use the pcb unique value to point the secondary at its pcpu * structure. */ *pcb = pcpu->pc_idlepcb; pcb->apcb_unique = (u_int64_t)pcpu; hwrpb->rpb_restart = (u_int64_t) smp_init_secondary_glue; hwrpb->rpb_restart_val = (u_int64_t) smp_init_secondary_glue; hwrpb->rpb_checksum = hwrpb_checksum(); /* * Tell the cpu to start with the same PALcode as us. */ bcopy(&bootcpu->pcs_pal_rev, &cpu->pcs_pal_rev, sizeof cpu->pcs_pal_rev); /* * Set flags in cpu structure and push out write buffers to * make sure the secondary sees it. */ cpu->pcs_flags |= PCS_CV|PCS_RC; cpu->pcs_flags &= ~PCS_BIP; alpha_mb(); /* * Fire it up and hope for the best. */ if (!smp_send_secondary_command("START\r\n", pal_id)) { printf("smp_start_secondary: can't send START command\n"); pcpu_destroy(pcpu); free(pcpu, M_TEMP); return 0; } /* * Wait for the secondary to set the BIP flag in its structure. */ for (i = 0; i < 100000; i++) { if (cpu->pcs_flags & PCS_BIP) break; DELAY(10); } if (!(cpu->pcs_flags & PCS_BIP)) { printf("smp_start_secondary: secondary did not respond\n"); pcpu_destroy(pcpu); free(pcpu, M_TEMP); return 0; } /* * It worked (I think). */ if (bootverbose) printf("smp_start_secondary: cpu %d started\n", pal_id); return 1; } /* Other stuff */ static int smp_cpu_enabled(struct pcs *pcsp) { /* Is this CPU present? */ if ((pcsp->pcs_flags & PCS_PP) == 0) return (0); /* Is this CPU available? */ if ((pcsp->pcs_flags & PCS_PA) == 0) /* * The TurboLaser PCS_PA bit doesn't seem to be set * correctly. */ if (hwrpb->rpb_type != ST_DEC_21000) return (0); /* Is this CPU's PALcode valid? */ if ((pcsp->pcs_flags & PCS_PV) == 0) return (0); return (1); } void cpu_mp_setmaxid(void) { u_int64_t i; mp_maxid = 0; for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { if (i == PCPU_GET(pal_id)) continue; if (!smp_cpu_enabled(LOCATE_PCS(hwrpb, i))) continue; mp_maxid++; } if (mp_maxid > MAXCPU) mp_maxid = MAXCPU; } int cpu_mp_probe(void) { int i, cpus; /* XXX: Need to check for valid platforms here. */ boot_cpu_id = PCPU_GET(pal_id); KASSERT(boot_cpu_id == hwrpb->rpb_primary_cpu_id, ("cpu_mp_probe() called on non-primary CPU")); all_cpus = PCPU_GET(cpumask); mp_ncpus = 1; /* Make sure we have at least one secondary CPU. */ cpus = 0; for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { if (i == PCPU_GET(pal_id)) continue; if (!smp_cpu_enabled(LOCATE_PCS(hwrpb, i))) continue; cpus++; } return (cpus); } void cpu_mp_start(void) { int i, cpuid; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); cpuid = 1; for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { struct pcs *pcsp; if (i == boot_cpu_id) continue; pcsp = LOCATE_PCS(hwrpb, i); if ((pcsp->pcs_flags & PCS_PP) == 0) continue; if ((pcsp->pcs_flags & PCS_PA) == 0) { if (hwrpb->rpb_type == ST_DEC_21000) { printf("Ignoring PA bit for CPU %d.\n", i); } else { if (bootverbose) printf("CPU %d not available.\n", i); continue; } } if ((pcsp->pcs_flags & PCS_PV) == 0) { if (bootverbose) printf("CPU %d does not have valid PALcode.\n", i); continue; } if (i > MAXCPU) { if (bootverbose) { printf("CPU %d not supported.", i); printf(" Only %d CPUs supported.\n", MAXCPU); } continue; } if (resource_disabled("cpu", i)) { printf("CPU %d disabled by loader.\n", i); continue; } if (smp_start_secondary(i, cpuid)) { all_cpus |= (1 << cpuid); mp_ncpus++; cpuid++; } } PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); } void cpu_mp_announce(void) { struct pcpu *pc; int i; /* List CPUs */ printf(" cpu0 (BSP): PAL ID: %2lu\n", boot_cpu_id); for (i = 1; i < MAXCPU; i++) { if (CPU_ABSENT(i)) continue; pc = pcpu_find(i); MPASS(pc != NULL); printf(" cpu%d (AP): PAL ID: %2lu\n", i, pc->pc_pal_id); } } /* * send an IPI to a set of cpus. */ void ipi_selected(u_int32_t cpus, u_int64_t ipi) { struct pcpu *pcpu; CTR2(KTR_SMP, "ipi_selected: cpus: %x ipi: %lx", cpus, ipi); alpha_mb(); while (cpus) { int cpuid = ffs(cpus) - 1; cpus &= ~(1 << cpuid); pcpu = pcpu_find(cpuid); if (pcpu) { atomic_set_64(&pcpu->pc_pending_ipis, ipi); alpha_mb(); CTR1(KTR_SMP, "calling alpha_pal_wripir(%d)", pcpu->pc_pal_id); alpha_pal_wripir(pcpu->pc_pal_id); } } } /* * send an IPI INTerrupt containing 'vector' to all CPUs, including myself */ void ipi_all(u_int64_t ipi) { ipi_selected(all_cpus, ipi); } /* * send an IPI to all CPUs EXCEPT myself */ void ipi_all_but_self(u_int64_t ipi) { ipi_selected(PCPU_GET(other_cpus), ipi); } /* * send an IPI to myself */ void ipi_self(u_int64_t ipi) { ipi_selected(PCPU_GET(cpumask), ipi); } /* * Handle an IPI sent to this processor. */ void smp_handle_ipi(struct trapframe *frame) { u_int64_t ipis = atomic_readandclear_64(PCPU_PTR(pending_ipis)); u_int64_t ipi; int cpumask; cpumask = PCPU_GET(cpumask); CTR1(KTR_SMP, "smp_handle_ipi(), ipis=%lx", ipis); while (ipis) { /* * Find the lowest set bit. */ ipi = ipis & ~(ipis - 1); ipis &= ~ipi; switch (ipi) { case IPI_INVLTLB: CTR0(KTR_SMP, "IPI_NVLTLB"); ALPHA_TBIA(); break; case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: CTR0(KTR_SMP, "IPI_STOP"); - atomic_set_int(&stopped_cpus, cpumask); + savectx(&stoppcbs[PCPU_GET(cpuid)]); + atomic_set_acq_int(&stopped_cpus, cpumask); while ((started_cpus & cpumask) == 0) - alpha_mb(); - atomic_clear_int(&started_cpus, cpumask); - atomic_clear_int(&stopped_cpus, cpumask); + cpu_spinwait(); + atomic_clear_rel_int(&started_cpus, cpumask); + atomic_clear_rel_int(&stopped_cpus, cpumask); break; } } /* * Dump console messages to the console. XXX - we need to handle * requests to provide PALcode to secondaries and to start up new * secondaries that are added to the system on the fly. */ if (PCPU_GET(pal_id) == boot_cpu_id) { u_int pal_id; u_int64_t txrdy; #ifdef DIAGNOSTIC struct pcs *cpu; char buf[81]; #endif alpha_mb(); while (hwrpb->rpb_txrdy != 0) { pal_id = ffs(hwrpb->rpb_txrdy) - 1; #ifdef DIAGNOSTIC cpu = LOCATE_PCS(hwrpb, pal_id); bcopy(&cpu->pcs_buffer.txbuf, buf, cpu->pcs_buffer.txlen); buf[cpu->pcs_buffer.txlen] = '\0'; printf("SMP From CPU%d: %s\n", pal_id, buf); #endif do { txrdy = hwrpb->rpb_txrdy; } while (atomic_cmpset_64(&hwrpb->rpb_txrdy, txrdy, txrdy & ~(1 << pal_id)) == 0); } } } static void release_aps(void *dummy __unused) { if (bootverbose && mp_ncpus > 1) printf("%s: releasing secondary CPUs\n", __func__); atomic_store_rel_int(&aps_ready, 1); while (mp_ncpus > 1 && smp_started == 0) ; /* nothing */ } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); Index: head/sys/alpha/include/smp.h =================================================================== --- head/sys/alpha/include/smp.h (revision 152021) +++ head/sys/alpha/include/smp.h (revision 152022) @@ -1,40 +1,41 @@ /*- * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ * */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #ifdef _KERNEL /* * Interprocessor interrupts for SMP. */ #define IPI_INVLTLB 0x0001 #define IPI_RENDEZVOUS 0x0002 #define IPI_AST 0x0004 #define IPI_CHECKSTATE 0x0008 #define IPI_STOP 0x0010 #ifndef LOCORE extern u_int64_t boot_cpu_id; +extern struct pcb stoppcbs[]; void ipi_selected(u_int cpus, u_int64_t ipi); void ipi_all(u_int64_t ipi); void ipi_all_but_self(u_int64_t ipi); void ipi_self(u_int64_t ipi); void smp_handle_ipi(struct trapframe *frame); void smp_init_secondary(void); #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* _MACHINE_SMP_H_ */ Index: head/sys/kern/subr_kdb.c =================================================================== --- head/sys/kern/subr_kdb.c (revision 152021) +++ head/sys/kern/subr_kdb.c (revision 152022) @@ -1,497 +1,497 @@ /*- * Copyright (c) 2004 The FreeBSD Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kdb.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP -#if defined (__i386__) || defined(__amd64__) +#if defined (__i386__) || defined(__amd64__) || defined(__sparc64__) || defined(__alpha__) #define HAVE_STOPPEDPCBS #include #endif #endif int kdb_active = 0; void *kdb_jmpbufp = NULL; struct kdb_dbbe *kdb_dbbe = NULL; struct pcb kdb_pcb; struct pcb *kdb_thrctx = NULL; struct thread *kdb_thread = NULL; struct trapframe *kdb_frame = NULL; KDB_BACKEND(null, NULL, NULL, NULL); SET_DECLARE(kdb_dbbe_set, struct kdb_dbbe); static int kdb_sysctl_available(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_current(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_panic(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_trap(SYSCTL_HANDLER_ARGS); SYSCTL_NODE(_debug, OID_AUTO, kdb, CTLFLAG_RW, NULL, "KDB nodes"); SYSCTL_PROC(_debug_kdb, OID_AUTO, available, CTLTYPE_STRING | CTLFLAG_RD, 0, 0, kdb_sysctl_available, "A", "list of available KDB backends"); SYSCTL_PROC(_debug_kdb, OID_AUTO, current, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, kdb_sysctl_current, "A", "currently selected KDB backend"); SYSCTL_PROC(_debug_kdb, OID_AUTO, enter, CTLTYPE_INT | CTLFLAG_RW, 0, 0, kdb_sysctl_enter, "I", "set to enter the debugger"); SYSCTL_PROC(_debug_kdb, OID_AUTO, panic, CTLTYPE_INT | CTLFLAG_RW, 0, 0, kdb_sysctl_panic, "I", "set to panic the kernel"); SYSCTL_PROC(_debug_kdb, OID_AUTO, trap, CTLTYPE_INT | CTLFLAG_RW, 0, 0, kdb_sysctl_trap, "I", "set cause a page fault"); /* * Flag indicating whether or not to IPI the other CPUs to stop them on * entering the debugger. Sometimes, this will result in a deadlock as * stop_cpus() waits for the other cpus to stop, so we allow it to be * disabled. */ #ifdef SMP static int kdb_stop_cpus = 1; SYSCTL_INT(_debug_kdb, OID_AUTO, stop_cpus, CTLTYPE_INT | CTLFLAG_RW, &kdb_stop_cpus, 0, ""); TUNABLE_INT("debug.kdb.stop_cpus", &kdb_stop_cpus); #endif static int kdb_sysctl_available(SYSCTL_HANDLER_ARGS) { struct kdb_dbbe *be, **iter; char *avail, *p; ssize_t len, sz; int error; sz = 0; SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0) sz += strlen(be->dbbe_name) + 1; } sz++; avail = malloc(sz, M_TEMP, M_WAITOK); p = avail; *p = '\0'; SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0) { len = snprintf(p, sz, "%s ", be->dbbe_name); p += len; sz -= len; } } KASSERT(sz >= 0, ("%s", __func__)); error = sysctl_handle_string(oidp, avail, 0, req); free(avail, M_TEMP); return (error); } static int kdb_sysctl_current(SYSCTL_HANDLER_ARGS) { char buf[16]; int error; if (kdb_dbbe != NULL) { strncpy(buf, kdb_dbbe->dbbe_name, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; } else *buf = '\0'; error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (kdb_active) return (EBUSY); return (kdb_dbbe_select(buf)); } static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS) { int error, i; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); if (kdb_active) return (EBUSY); kdb_enter("sysctl debug.kdb.enter"); return (0); } static int kdb_sysctl_panic(SYSCTL_HANDLER_ARGS) { int error, i; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); panic("kdb_sysctl_panic"); return (0); } static int kdb_sysctl_trap(SYSCTL_HANDLER_ARGS) { int error, i; int *addr = (int *)0x10; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); return (*addr); } /* * Solaris implements a new BREAK which is initiated by a character sequence * CR ~ ^b which is similar to a familiar pattern used on Sun servers by the * Remote Console. * * Note that this function may be called from almost anywhere, with interrupts * disabled and with unknown locks held, so it must not access data other than * its arguments. Its up to the caller to ensure that the state variable is * consistent. */ #define KEY_CR 13 /* CR '\r' */ #define KEY_TILDE 126 /* ~ */ #define KEY_CRTLB 2 /* ^B */ int kdb_alt_break(int key, int *state) { int brk; brk = 0; switch (key) { case KEY_CR: *state = KEY_TILDE; break; case KEY_TILDE: *state = (*state == KEY_TILDE) ? KEY_CRTLB : 0; break; case KEY_CRTLB: if (*state == KEY_CRTLB) brk = 1; /* FALLTHROUGH */ default: *state = 0; break; } return (brk); } /* * Print a backtrace of the calling thread. The backtrace is generated by * the selected debugger, provided it supports backtraces. If no debugger * is selected or the current debugger does not support backtraces, this * function silently returns. */ void kdb_backtrace() { if (kdb_dbbe != NULL && kdb_dbbe->dbbe_trace != NULL) { printf("KDB: stack backtrace:\n"); kdb_dbbe->dbbe_trace(); } } /* * Set/change the current backend. */ int kdb_dbbe_select(const char *name) { struct kdb_dbbe *be, **iter; SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0 && strcmp(be->dbbe_name, name) == 0) { kdb_dbbe = be; return (0); } } return (EINVAL); } /* * Enter the currently selected debugger. If a message has been provided, * it is printed first. If the debugger does not support the enter method, * it is entered by using breakpoint(), which enters the debugger through * kdb_trap(). */ void kdb_enter(const char *msg) { if (kdb_dbbe != NULL && kdb_active == 0) { if (msg != NULL) printf("KDB: enter: %s\n", msg); breakpoint(); } } /* * Initialize the kernel debugger interface. */ void kdb_init() { struct kdb_dbbe *be, **iter; int cur_pri, pri; kdb_active = 0; kdb_dbbe = NULL; cur_pri = -1; SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; pri = (be->dbbe_init != NULL) ? be->dbbe_init() : -1; be->dbbe_active = (pri >= 0) ? 0 : -1; if (pri > cur_pri) { cur_pri = pri; kdb_dbbe = be; } } if (kdb_dbbe != NULL) { printf("KDB: debugger backends:"); SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0) printf(" %s", be->dbbe_name); } printf("\n"); printf("KDB: current backend: %s\n", kdb_dbbe->dbbe_name); } } /* * Handle contexts. */ void * kdb_jmpbuf(jmp_buf new) { void *old; old = kdb_jmpbufp; kdb_jmpbufp = new; return (old); } void kdb_reenter(void) { if (!kdb_active || kdb_jmpbufp == NULL) return; longjmp(kdb_jmpbufp, 1); /* NOTREACHED */ } /* * Thread related support functions. */ struct pcb * kdb_thr_ctx(struct thread *thr) { #ifdef HAVE_STOPPEDPCBS struct pcpu *pc; u_int cpuid; #endif if (thr == curthread) return (&kdb_pcb); #ifdef HAVE_STOPPEDPCBS SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { cpuid = pc->pc_cpuid; if (pc->pc_curthread == thr && (stopped_cpus & (1 << cpuid))) return (&stoppcbs[cpuid]); } #endif return (thr->td_pcb); } struct thread * kdb_thr_first(void) { struct proc *p; struct thread *thr; p = LIST_FIRST(&allproc); while (p != NULL) { if (p->p_sflag & PS_INMEM) { thr = FIRST_THREAD_IN_PROC(p); if (thr != NULL) return (thr); } p = LIST_NEXT(p, p_list); } return (NULL); } struct thread * kdb_thr_from_pid(pid_t pid) { struct proc *p; p = LIST_FIRST(&allproc); while (p != NULL) { if (p->p_sflag & PS_INMEM && p->p_pid == pid) return (FIRST_THREAD_IN_PROC(p)); p = LIST_NEXT(p, p_list); } return (NULL); } struct thread * kdb_thr_lookup(lwpid_t tid) { struct thread *thr; thr = kdb_thr_first(); while (thr != NULL && thr->td_tid != tid) thr = kdb_thr_next(thr); return (thr); } struct thread * kdb_thr_next(struct thread *thr) { struct proc *p; p = thr->td_proc; thr = TAILQ_NEXT(thr, td_plist); do { if (thr != NULL) return (thr); p = LIST_NEXT(p, p_list); if (p != NULL && (p->p_sflag & PS_INMEM)) thr = FIRST_THREAD_IN_PROC(p); } while (p != NULL); return (NULL); } int kdb_thr_select(struct thread *thr) { if (thr == NULL) return (EINVAL); kdb_thread = thr; kdb_thrctx = kdb_thr_ctx(thr); return (0); } /* * Enter the debugger due to a trap. */ int kdb_trap(int type, int code, struct trapframe *tf) { #ifdef SMP int did_stop_cpus; #endif int handled; if (kdb_dbbe == NULL || kdb_dbbe->dbbe_trap == NULL) return (0); /* We reenter the debugger through kdb_reenter(). */ if (kdb_active) return (0); critical_enter(); kdb_active++; #ifdef SMP if ((did_stop_cpus = kdb_stop_cpus) != 0) stop_cpus(PCPU_GET(other_cpus)); #endif kdb_frame = tf; /* Let MD code do its thing first... */ kdb_cpu_trap(type, code); makectx(tf, &kdb_pcb); kdb_thr_select(curthread); handled = kdb_dbbe->dbbe_trap(type, code); #ifdef SMP if (did_stop_cpus) restart_cpus(stopped_cpus); #endif kdb_active--; critical_exit(); return (handled); } Index: head/sys/sparc64/include/smp.h =================================================================== --- head/sys/sparc64/include/smp.h (revision 152021) +++ head/sys/sparc64/include/smp.h (revision 152022) @@ -1,256 +1,258 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #define CPU_CLKSYNC 1 #define CPU_INIT 2 #define CPU_BOOTSTRAP 3 #ifndef LOCORE #include #include #define IDR_BUSY (1<<0) #define IDR_NACK (1<<1) #define IPI_AST PIL_AST #define IPI_RENDEZVOUS PIL_RENDEZVOUS #define IPI_STOP PIL_STOP #define IPI_RETRIES 5000 struct cpu_start_args { u_int csa_count; u_int csa_mid; u_int csa_state; vm_offset_t csa_pcpu; u_long csa_tick; u_long csa_ver; struct tte csa_ttes[PCPU_PAGES]; }; struct ipi_cache_args { u_int ica_mask; vm_paddr_t ica_pa; }; struct ipi_tlb_args { u_int ita_mask; struct pmap *ita_pmap; u_long ita_start; u_long ita_end; }; #define ita_va ita_start struct pcpu; +extern struct pcb stoppcbs[]; + void cpu_mp_bootstrap(struct pcpu *pc); void cpu_mp_shutdown(void); void cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2); void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2); void ipi_selected(u_int cpus, u_int ipi); void ipi_all(u_int ipi); void ipi_all_but_self(u_int ipi); vm_offset_t mp_tramp_alloc(void); extern struct mtx ipi_mtx; extern struct ipi_cache_args ipi_cache_args; extern struct ipi_tlb_args ipi_tlb_args; extern vm_offset_t mp_tramp; extern char *mp_tramp_code; extern u_long mp_tramp_code_len; extern u_long mp_tramp_tlb_slots; extern u_long mp_tramp_func; extern void mp_startup(void); extern char tl_ipi_cheetah_dcache_page_inval[]; extern char tl_ipi_spitfire_dcache_page_inval[]; extern char tl_ipi_spitfire_icache_page_inval[]; extern char tl_ipi_level[]; extern char tl_ipi_tlb_context_demap[]; extern char tl_ipi_tlb_page_demap[]; extern char tl_ipi_tlb_range_demap[]; #ifdef SMP #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_) static __inline void * ipi_dcache_page_inval(void *func, vm_paddr_t pa) { struct ipi_cache_args *ica; if (smp_cpus == 1) return (NULL); ica = &ipi_cache_args; mtx_lock_spin(&ipi_mtx); ica->ica_mask = all_cpus; ica->ica_pa = pa; cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica); return (&ica->ica_mask); } static __inline void * ipi_icache_page_inval(void *func, vm_paddr_t pa) { struct ipi_cache_args *ica; if (smp_cpus == 1) return (NULL); ica = &ipi_cache_args; mtx_lock_spin(&ipi_mtx); ica->ica_mask = all_cpus; ica->ica_pa = pa; cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica); return (&ica->ica_mask); } static __inline void * ipi_tlb_context_demap(struct pmap *pm) { struct ipi_tlb_args *ita; u_int cpus; if (smp_cpus == 1) return (NULL); if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) return (NULL); ita = &ipi_tlb_args; mtx_lock_spin(&ipi_mtx); ita->ita_mask = cpus | PCPU_GET(cpumask); ita->ita_pmap = pm; cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap, (u_long)ita); return (&ita->ita_mask); } static __inline void * ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va) { struct ipi_tlb_args *ita; u_int cpus; if (smp_cpus == 1) return (NULL); if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) return (NULL); ita = &ipi_tlb_args; mtx_lock_spin(&ipi_mtx); ita->ita_mask = cpus | PCPU_GET(cpumask); ita->ita_pmap = pm; ita->ita_va = va; cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita); return (&ita->ita_mask); } static __inline void * ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) { struct ipi_tlb_args *ita; u_int cpus; if (smp_cpus == 1) return (NULL); if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) return (NULL); ita = &ipi_tlb_args; mtx_lock_spin(&ipi_mtx); ita->ita_mask = cpus | PCPU_GET(cpumask); ita->ita_pmap = pm; ita->ita_start = start; ita->ita_end = end; cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita); return (&ita->ita_mask); } static __inline void ipi_wait(void *cookie) { volatile u_int *mask; if ((mask = cookie) != NULL) { atomic_clear_int(mask, PCPU_GET(cpumask)); while (*mask != 0) ; mtx_unlock_spin(&ipi_mtx); } } #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */ #else static __inline void * ipi_dcache_page_inval(void *func, vm_paddr_t pa) { return (NULL); } static __inline void * ipi_icache_page_inval(void *func, vm_paddr_t pa) { return (NULL); } static __inline void * ipi_tlb_context_demap(struct pmap *pm) { return (NULL); } static __inline void * ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va) { return (NULL); } static __inline void * ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) { return (NULL); } static __inline void ipi_wait(void *cookie) { } #endif /* SMP */ #endif /* !LOCORE */ #endif /* !_MACHINE_SMP_H_ */ Index: head/sys/sparc64/sparc64/mp_machdep.c =================================================================== --- head/sys/sparc64/sparc64/mp_machdep.c (revision 152021) +++ head/sys/sparc64/sparc64/mp_machdep.c (revision 152022) @@ -1,471 +1,474 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp */ /*- * Copyright (c) 2002 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include static ih_func_t cpu_ipi_ast; static ih_func_t cpu_ipi_stop; /* * Argument area used to pass data to non-boot processors as they start up. * This must be statically initialized with a known invalid upa module id, * since the other processors will use it before the boot cpu enters the * kernel. */ struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0 }; struct ipi_cache_args ipi_cache_args; struct ipi_tlb_args ipi_tlb_args; +struct pcb stoppcbs[MAXCPU]; struct mtx ipi_mtx; vm_offset_t mp_tramp; u_int mp_boot_mid; static volatile u_int shutdown_cpus; void cpu_mp_unleash(void *); SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); vm_offset_t mp_tramp_alloc(void) { struct tte *tp; char *v; int i; v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE); if (v == NULL) panic("mp_tramp_alloc"); bcopy(mp_tramp_code, v, mp_tramp_code_len); *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots; *(u_long *)(v + mp_tramp_func) = (u_long)mp_startup; tp = (struct tte *)(v + mp_tramp_code_len); for (i = 0; i < kernel_tlb_slots; i++) { tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M); tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; } for (i = 0; i < PAGE_SIZE; i += sizeof(long)) flush(v + i); return (vm_offset_t)v; } /* * Probe for other cpus. */ void cpu_mp_setmaxid(void) { phandle_t child; phandle_t root; char buf[128]; int cpus; all_cpus = 1 << PCPU_GET(cpuid); mp_boot_mid = PCPU_GET(mid); mp_ncpus = 1; cpus = 0; root = OF_peer(0); for (child = OF_child(root); child != 0; child = OF_peer(child)) { if (OF_getprop(child, "device_type", buf, sizeof(buf)) > 0 && strcmp(buf, "cpu") == 0) cpus++; } mp_maxid = cpus - 1; } int cpu_mp_probe(void) { return (mp_maxid > 0); } static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg) { static struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t cpu; cell_t func; cell_t arg; } args = { (cell_t)"SUNW,start-cpu", 3, 0, 0, 0, 0 }; args.cpu = cpu; args.func = (cell_t)func; args.arg = (cell_t)arg; openfirmware(&args); } /* * Stop the calling CPU. */ static void sun4u_stopself(void) { static struct { cell_t name; cell_t nargs; cell_t nreturns; } args = { (cell_t)"SUNW,stop-self", 0, 0, }; openfirmware_exit(&args); panic("sun4u_stopself: failed."); } /* * Fire up any non-boot processors. */ void cpu_mp_start(void) { volatile struct cpu_start_args *csa; struct pcpu *pc; phandle_t child; phandle_t root; vm_offset_t va; char buf[128]; u_int clock; int cpuid; u_int mid; u_long s; mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN); intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL); intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action, -1, NULL, NULL); intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL); root = OF_peer(0); csa = &cpu_start_args; for (child = OF_child(root); child != 0; child = OF_peer(child)) { if (OF_getprop(child, "device_type", buf, sizeof(buf)) <= 0 || strcmp(buf, "cpu") != 0) continue; if (OF_getprop(child, "upa-portid", &mid, sizeof(mid)) <= 0 && OF_getprop(child, "portid", &mid, sizeof(mid)) <= 0) panic("cpu_mp_start: can't get module id"); if (mid == mp_boot_mid) continue; if (OF_getprop(child, "clock-frequency", &clock, sizeof(clock)) <= 0) panic("cpu_mp_start: can't get clock"); csa->csa_state = 0; sun4u_startcpu(child, (void *)mp_tramp, 0); s = intr_disable(); while (csa->csa_state != CPU_CLKSYNC) ; membar(StoreLoad); csa->csa_tick = rd(tick); while (csa->csa_state != CPU_INIT) ; csa->csa_tick = 0; intr_restore(s); cpuid = mp_ncpus++; cpu_identify(csa->csa_ver, clock, cpuid); va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE); pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1; pcpu_init(pc, cpuid, sizeof(*pc)); pc->pc_addr = va; pc->pc_mid = mid; pc->pc_node = child; all_cpus |= 1 << cpuid; } PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); smp_active = 1; } void cpu_mp_announce(void) { } void cpu_mp_unleash(void *v) { volatile struct cpu_start_args *csa; struct pcpu *pc; vm_offset_t va; vm_paddr_t pa; u_int ctx_min; u_int ctx_inc; u_long s; int i; ctx_min = TLB_CTX_USER_MIN; ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus; csa = &cpu_start_args; csa->csa_count = mp_ncpus; SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { pc->pc_tlb_ctx = ctx_min; pc->pc_tlb_ctx_min = ctx_min; pc->pc_tlb_ctx_max = ctx_min + ctx_inc; ctx_min += ctx_inc; if (pc->pc_cpuid == PCPU_GET(cpuid)) continue; KASSERT(pc->pc_idlethread != NULL, ("cpu_mp_unleash: idlethread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_curthread->td_pcb; for (i = 0; i < PCPU_PAGES; i++) { va = pc->pc_addr + i * PAGE_SIZE; pa = pmap_kextract(va); if (pa == 0) panic("cpu_mp_unleash: pmap_kextract\n"); csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K); csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; } csa->csa_state = 0; csa->csa_pcpu = pc->pc_addr; csa->csa_mid = pc->pc_mid; s = intr_disable(); while (csa->csa_state != CPU_BOOTSTRAP) ; intr_restore(s); } membar(StoreLoad); csa->csa_count = 0; smp_started = 1; } void cpu_mp_bootstrap(struct pcpu *pc) { volatile struct cpu_start_args *csa; csa = &cpu_start_args; pmap_map_tsb(); cpu_setregs(pc); tick_start(); smp_cpus++; KASSERT(curthread != NULL, ("cpu_mp_bootstrap: curthread")); PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); csa->csa_count--; membar(StoreLoad); csa->csa_state = CPU_BOOTSTRAP; while (csa->csa_count != 0) ; /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); spinlock_exit(); binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ } void cpu_mp_shutdown(void) { int i; critical_enter(); shutdown_cpus = PCPU_GET(other_cpus); if (stopped_cpus != PCPU_GET(other_cpus)) /* XXX */ stop_cpus(stopped_cpus ^ PCPU_GET(other_cpus)); i = 0; while (shutdown_cpus != 0) { if (i++ > 100000) { printf("timeout shutting down CPUs.\n"); break; } } /* XXX: delay a bit to allow the CPUs to actually enter the PROM. */ DELAY(100000); critical_exit(); } static void cpu_ipi_ast(struct trapframe *tf) { } static void cpu_ipi_stop(struct trapframe *tf) { CTR1(KTR_SMP, "cpu_ipi_stop: stopped %d", PCPU_GET(cpuid)); + savectx(&stoppcbs[PCPU_GET(cpuid)]); atomic_set_acq_int(&stopped_cpus, PCPU_GET(cpumask)); while ((started_cpus & PCPU_GET(cpumask)) == 0) { if ((shutdown_cpus & PCPU_GET(cpumask)) != 0) { atomic_clear_int(&shutdown_cpus, PCPU_GET(cpumask)); sun4u_stopself(); } } atomic_clear_rel_int(&started_cpus, PCPU_GET(cpumask)); atomic_clear_rel_int(&stopped_cpus, PCPU_GET(cpumask)); CTR1(KTR_SMP, "cpu_ipi_stop: restarted %d", PCPU_GET(cpuid)); } void cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2) { struct pcpu *pc; u_int cpu; while (cpus) { cpu = ffs(cpus) - 1; cpus &= ~(1 << cpu); pc = pcpu_find(cpu); cpu_ipi_send(pc->pc_mid, d0, d1, d2); } } void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2) { u_long s; int i; KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0, ("cpu_ipi_send: outstanding dispatch")); for (i = 0; i < IPI_RETRIES; i++) { s = intr_disable(); stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); stxa(AA_INTR_SEND | (mid << 14), ASI_SDB_INTR_W, 0); membar(Sync); while (ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) ; intr_restore(s); if ((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_NACK) == 0) return; } if ( #ifdef KDB kdb_active || #endif panicstr != NULL) printf("cpu_ipi_send: couldn't send ipi to module %u\n", mid); else panic("cpu_ipi_send: couldn't send ipi"); } void ipi_selected(u_int cpus, u_int ipi) { cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi); } void ipi_all(u_int ipi) { panic("ipi_all"); } void ipi_all_but_self(u_int ipi) { cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi); }