Index: head/sys/mips/cavium/octeon_mp.c =================================================================== --- head/sys/mips/cavium/octeon_mp.c (revision 305620) +++ head/sys/mips/cavium/octeon_mp.c (revision 305621) @@ -1,146 +1,154 @@ /*- * Copyright (c) 2004-2010 Juli Mallett * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include unsigned octeon_ap_boot = ~0; void platform_ipi_send(int cpuid) { cvmx_write_csr(CVMX_CIU_MBOX_SETX(cpuid), 1); mips_wbflush(); } void platform_ipi_clear(void) { uint64_t action; action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(PCPU_GET(cpuid))); KASSERT(action == 1, ("unexpected IPIs: %#jx", (uintmax_t)action)); cvmx_write_csr(CVMX_CIU_MBOX_CLRX(PCPU_GET(cpuid)), action); } int -platform_ipi_intrnum(void) +platform_ipi_hardintr_num(void) { + return (1); } +int +platform_ipi_softintr_num(void) +{ + + return (-1); +} + void platform_init_ap(int cpuid) { unsigned ciu_int_mask, clock_int_mask, ipi_int_mask; /* * Set the exception base. */ mips_wr_ebase(0x80000000); /* * Clear any pending IPIs. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cpuid), 0xffffffff); /* * Set up interrupts. */ octeon_ciu_reset(); /* * Unmask the clock, ipi and ciu interrupts. */ ciu_int_mask = hard_int_mask(0); clock_int_mask = hard_int_mask(5); - ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); + ipi_int_mask = hard_int_mask(platform_ipi_hardintr_num()); set_intr_mask(ciu_int_mask | clock_int_mask | ipi_int_mask); mips_wbflush(); } void platform_cpu_mask(cpuset_t *mask) { uint64_t core_mask = cvmx_sysinfo_get()->core_mask; uint64_t i, m; CPU_ZERO(mask); for (i = 0, m = 1 ; i < MAXCPU; i++, m <<= 1) if (core_mask & m) CPU_SET(i, mask); } struct cpu_group * platform_smp_topo(void) { return (smp_topo_none()); } int platform_start_ap(int cpuid) { uint64_t cores_in_reset; /* * Release the core if it is in reset, and let it rev up a bit. * The real synchronization happens below via octeon_ap_boot. */ cores_in_reset = cvmx_read_csr(CVMX_CIU_PP_RST); if (cores_in_reset & (1ULL << cpuid)) { if (bootverbose) printf ("AP #%d still in reset\n", cpuid); cores_in_reset &= ~(1ULL << cpuid); cvmx_write_csr(CVMX_CIU_PP_RST, (uint64_t)(cores_in_reset)); DELAY(2000); /* Give it a moment to start */ } if (atomic_cmpset_32(&octeon_ap_boot, ~0, cpuid) == 0) return (-1); for (;;) { DELAY(1000); if (atomic_cmpset_32(&octeon_ap_boot, 0, ~0) != 0) return (0); printf("Waiting for cpu%d to start\n", cpuid); } } Index: head/sys/mips/gxemul/gxemul_machdep.c =================================================================== --- head/sys/mips/gxemul/gxemul_machdep.c (revision 305620) +++ head/sys/mips/gxemul/gxemul_machdep.c (revision 305621) @@ -1,237 +1,245 @@ /*- * Copyright (c) 2006 Wojciech A. Koszek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #include #endif #include extern int *edata; extern int *end; void platform_cpu_init() { /* Nothing special */ } static void mips_init(void) { int i; for (i = 0; i < 10; i++) { phys_avail[i] = 0; } /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1]; physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } /* * Perform a board-level soft-reset. * * XXXRW: Does gxemul have a moral equivalent to board-level reset? */ void platform_reset(void) { panic("%s: not yet", __func__); } void platform_start(__register_t a0, __register_t a1, __register_t a2, __register_t a3) { vm_offset_t kernend; uint64_t platform_counter_freq; int argc = a0; char **argv = (char **)a1; char **envp = (char **)a2; int i; /* clear the BSS and SBSS segments */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); mips_pcpu0_init(); /* * XXXRW: Support for the gxemul real-time clock required in order to * usefully determine our emulated timer frequency. Go with something * classic as the default in the mean time. */ platform_counter_freq = MIPS_DEFAULT_HZ; mips_timer_early_init(platform_counter_freq); cninit(); printf("entry: platform_start()\n"); bootverbose = 1; if (bootverbose) { printf("cmd line: "); for (i = 0; i < argc; i++) printf("%s ", argv[i]); printf("\n"); if (envp != NULL) { printf("envp:\n"); for (i = 0; envp[i]; i += 2) printf("\t%s = %s\n", envp[i], envp[i+1]); } else { printf("no envp.\n"); } } realmem = btoc(GXEMUL_MP_DEV_READ(GXEMUL_MP_DEV_MEMORY)); mips_init(); mips_timer_init_params(platform_counter_freq, 0); } #ifdef SMP void platform_ipi_send(int cpuid) { GXEMUL_MP_DEV_WRITE(GXEMUL_MP_DEV_IPI_ONE, (1 << 16) | cpuid); } void platform_ipi_clear(void) { GXEMUL_MP_DEV_WRITE(GXEMUL_MP_DEV_IPI_READ, 0); } int -platform_ipi_intrnum(void) +platform_ipi_hardintr_num(void) { + return (GXEMUL_MP_DEV_IPI_INTERRUPT - 2); } +int +platform_ipi_softintr_num(void) +{ + + return (-1); +} + struct cpu_group * platform_smp_topo(void) { return (smp_topo_none()); } void platform_init_ap(int cpuid) { int ipi_int_mask, clock_int_mask; /* * Unmask the clock and ipi interrupts. */ clock_int_mask = hard_int_mask(5); - ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); + ipi_int_mask = hard_int_mask(platform_ipi_hardintr_num()); set_intr_mask(ipi_int_mask | clock_int_mask); } void platform_cpu_mask(cpuset_t *mask) { unsigned i, n; n = GXEMUL_MP_DEV_READ(GXEMUL_MP_DEV_NCPUS); CPU_ZERO(mask); for (i = 0; i < n; i++) CPU_SET(i, mask); } int platform_processor_id(void) { return (GXEMUL_MP_DEV_READ(GXEMUL_MP_DEV_WHOAMI)); } int platform_start_ap(int cpuid) { GXEMUL_MP_DEV_WRITE(GXEMUL_MP_DEV_STARTADDR, (intptr_t)mpentry); GXEMUL_MP_DEV_WRITE(GXEMUL_MP_DEV_START, cpuid); return (0); } #endif /* SMP */ Index: head/sys/mips/include/hwfunc.h =================================================================== --- head/sys/mips/include/hwfunc.h (revision 305620) +++ head/sys/mips/include/hwfunc.h (revision 305621) @@ -1,101 +1,102 @@ /*- * Copyright (c) 2003-2004 Juli Mallett. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_HWFUNC_H_ #define _MACHINE_HWFUNC_H_ #include struct timecounter; /* * Hooks downward into platform functionality. */ void platform_reset(void); void platform_start(__register_t, __register_t, __register_t, __register_t); /* For clocks and ticks and such */ void platform_initclocks(void); uint64_t platform_get_frequency(void); unsigned platform_get_timecount(struct timecounter *); /* For hardware specific CPU initialization */ void platform_cpu_init(void); #ifdef SMP /* * Spin up the AP so that it starts executing MP bootstrap entry point: mpentry * * Returns 0 on sucess and non-zero on failure. */ int platform_start_ap(int processor_id); /* * Platform-specific initialization that needs to be done when an AP starts * running. This function is called from the MP bootstrap code in mpboot.S */ void platform_init_ap(int processor_id); /* * Return a plaform-specific interrrupt number that is used to deliver IPIs. * * This hardware interrupt is used to deliver IPIs exclusively and must * not be used for any other interrupt source. */ -int platform_ipi_intrnum(void); +int platform_ipi_hardintr_num(void); +int platform_ipi_softintr_num(void); /* * Trigger a IPI interrupt on 'cpuid'. */ void platform_ipi_send(int cpuid); /* * Quiesce the IPI interrupt source on the current cpu. */ void platform_ipi_clear(void); /* * Return the processor id. * * Note that this function is called in early boot when stack is not available. */ extern int platform_processor_id(void); /* * Return the cpumask of available processors. */ extern void platform_cpu_mask(cpuset_t *mask); /* * Return the topology of processors on this platform */ struct cpu_group *platform_smp_topo(void); #endif /* SMP */ #endif /* !_MACHINE_HWFUNC_H_ */ Index: head/sys/mips/mips/mp_machdep.c =================================================================== --- head/sys/mips/mips/mp_machdep.c (revision 305620) +++ head/sys/mips/mips/mp_machdep.c (revision 305621) @@ -1,358 +1,364 @@ /*- * Copyright (c) 2009 Neelkanth Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct pcb stoppcbs[MAXCPU]; static void *dpcpu; static struct mtx ap_boot_mtx; static volatile int aps_ready; static volatile int mp_naps; static void ipi_send(struct pcpu *pc, int ipi) { CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi); atomic_set_32(&pc->pc_pending_ipis, ipi); platform_ipi_send(pc->pc_cpuid); CTR1(KTR_SMP, "%s: sent", __func__); } void ipi_all_but_self(int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); ipi_selected(other_cpus, ipi); } /* Send an IPI to a set of cpus. */ void ipi_selected(cpuset_t cpus, int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (CPU_ISSET(pc->pc_cpuid, &cpus)) { CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc, ipi); ipi_send(pc, ipi); } } } /* Send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi); ipi_send(cpuid_to_pcpu[cpu], ipi); } /* * Handle an IPI sent to this processor. */ static int mips_ipi_handler(void *arg) { u_int cpu, ipi, ipi_bitmap; int bit; cpu = PCPU_GET(cpuid); platform_ipi_clear(); /* quiesce the pending ipi interrupt */ ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); if (ipi_bitmap == 0) return (FILTER_STRAY); CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap); while ((bit = ffs(ipi_bitmap))) { bit = bit - 1; ipi = 1 << bit; ipi_bitmap &= ~ipi; switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); tlb_save(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } } return (FILTER_HANDLED); } static int start_ap(int cpuid) { int cpus, ms; cpus = mp_naps; dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); mips_sync(); if (platform_start_ap(cpuid) != 0) return (-1); /* could not start AP */ for (ms = 0; ms < 5000; ++ms) { if (mp_naps > cpus) return (0); /* success */ else DELAY(1000); } return (-2); /* timeout initializing AP */ } void cpu_mp_setmaxid(void) { cpuset_t cpumask; int cpu, last; platform_cpu_mask(&cpumask); mp_ncpus = 0; last = 1; while ((cpu = CPU_FFS(&cpumask)) != 0) { last = cpu; cpu--; CPU_CLR(cpu, &cpumask); mp_ncpus++; } if (mp_ncpus <= 0) mp_ncpus = 1; mp_maxid = min(last, MAXCPU) - 1; } void cpu_mp_announce(void) { /* NOTHING */ } struct cpu_group * cpu_topo(void) { return (platform_smp_topo()); } int cpu_mp_probe(void) { return (mp_ncpus > 1); } void cpu_mp_start(void) { int error, cpuid; cpuset_t cpumask; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_ZERO(&all_cpus); platform_cpu_mask(&cpumask); while (!CPU_EMPTY(&cpumask)) { cpuid = CPU_FFS(&cpumask) - 1; CPU_CLR(cpuid, &cpumask); if (cpuid >= MAXCPU) { printf("cpu_mp_start: ignoring AP #%d.\n", cpuid); continue; } if (cpuid != platform_processor_id()) { if ((error = start_ap(cpuid)) != 0) { printf("AP #%d failed to start: %d\n", cpuid, error); continue; } if (bootverbose) printf("AP #%d started!\n", cpuid); } CPU_SET(cpuid, &all_cpus); } } void smp_init_secondary(u_int32_t cpuid) { /* TLB */ mips_wr_wired(0); tlb_invalidate_all(); mips_wr_wired(VMWIRED_ENTRIES); /* * We assume that the L1 cache on the APs is identical to the one * on the BSP. */ mips_dcache_wbinv_all(); mips_icache_sync_all(); mips_sync(); mips_wr_entryhi(0); pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu)); dpcpu_init(dpcpu, cpuid); /* The AP has initialized successfully - allow the BSP to proceed */ ++mp_naps; /* Spin until the BSP is ready to release the APs */ while (!aps_ready) ; /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); mtx_lock_spin(&ap_boot_mtx); smp_cpus++; CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid)); if (bootverbose) printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid)); if (smp_cpus == mp_ncpus) { atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); while (smp_started == 0) ; /* nothing */ /* Start per-CPU event timers. */ cpu_initclocks_ap(); /* enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } static void release_aps(void *dummy __unused) { int ipi_irq; if (mp_ncpus == 1) return; /* * IPI handler */ - ipi_irq = platform_ipi_intrnum(); - cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq, - INTR_TYPE_MISC | INTR_EXCL, NULL); + ipi_irq = platform_ipi_hardintr_num(); + if (ipi_irq != -1) { + cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, + ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL); + } else { + ipi_irq = platform_ipi_softintr_num(); + cpu_establish_softintr("ipi", mips_ipi_handler, NULL, NULL, + ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL); + } atomic_store_rel_int(&aps_ready, 1); while (smp_started == 0) ; /* nothing */ } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); Index: head/sys/mips/nlm/xlp_machdep.c =================================================================== --- head/sys/mips/nlm/xlp_machdep.c (revision 305620) +++ head/sys/mips/nlm/xlp_machdep.c (revision 305621) @@ -1,734 +1,741 @@ /*- * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * NETLOGIC_BSD */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include /* cinit() */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif /* 4KB static data aread to keep a copy of the bootload env until the dynamic kenv is setup */ char boot1_env[4096]; uint64_t xlp_cpu_frequency; uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE); int xlp_ncores; int xlp_threads_per_core; uint32_t xlp_hw_thread_mask; int xlp_cpuid_to_hwtid[MAXCPU]; int xlp_hwtid_to_cpuid[MAXCPU]; uint64_t xlp_pic_base; static int xlp_mmuval; extern uint32_t _end; extern char XLPResetEntry[], XLPResetEntryEnd[]; static void xlp_setup_core(void) { uint64_t reg; reg = nlm_mfcr(LSU_DEFEATURE); /* Enable Unaligned and L2HPE */ reg |= (1 << 30) | (1 << 23); /* * Experimental : Enable SUE * Speculative Unmap Enable. Enable speculative L2 cache request for * unmapped access. */ reg |= (1ull << 31); /* Clear S1RCM - A0 errata */ reg &= ~0xeull; nlm_mtcr(LSU_DEFEATURE, reg); reg = nlm_mfcr(SCHED_DEFEATURE); /* Experimental: Disable BRU accepting ALU ops - A0 errata */ reg |= (1 << 24); nlm_mtcr(SCHED_DEFEATURE, reg); } static void xlp_setup_mmu(void) { uint32_t pagegrain; if (nlm_threadid() == 0) { nlm_setup_extended_pagemask(0); nlm_large_variable_tlb_en(1); nlm_extended_tlb_en(1); nlm_mmu_setup(0, 0, 0); } /* Enable no-read, no-exec, large-physical-address */ pagegrain = mips_rd_pagegrain(); pagegrain |= (1U << 31) | /* RIE */ (1 << 30) | /* XIE */ (1 << 29); /* ELPA */ mips_wr_pagegrain(pagegrain); } static void xlp_enable_blocks(void) { uint64_t sysbase; int i; for (i = 0; i < XLP_MAX_NODES; i++) { if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i))) continue; sysbase = nlm_get_sys_regbase(i); nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA); } } static void xlp_parse_mmu_options(void) { uint64_t sysbase; uint32_t cpu_map = xlp_hw_thread_mask; uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask; int i, j, k; #ifdef SMP if (cpu_map == 0) cpu_map = 0xffffffff; #else /* Uniprocessor! */ if (cpu_map == 0) cpu_map = 0x1; else if (cpu_map != 0x1) { printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n" "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map); cpu_map = 0x1; } #endif xlp_ncores = 1; core0_thr_mask = cpu_map & 0xf; switch (core0_thr_mask) { case 1: xlp_threads_per_core = 1; xlp_mmuval = 0; break; case 3: xlp_threads_per_core = 2; xlp_mmuval = 2; break; case 0xf: xlp_threads_per_core = 4; xlp_mmuval = 3; break; default: goto unsupp; } /* Try to find the enabled cores from SYS block */ sysbase = nlm_get_sys_regbase(0); cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff; /* XLP 416 does not report this correctly, fix */ if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416) cpu_rst_mask = 0xe; /* Take out cores which do not exist on chip */ for (i = 1; i < XLP_MAX_CORES; i++) { if ((cpu_rst_mask & (1 << i)) == 0) cpu_map &= ~(0xfu << (4 * i)); } /* Verify other cores' CPU masks */ for (i = 1; i < XLP_MAX_CORES; i++) { core_thr_mask = (cpu_map >> (4 * i)) & 0xf; if (core_thr_mask == 0) continue; if (core_thr_mask != core0_thr_mask) goto unsupp; xlp_ncores++; } xlp_hw_thread_mask = cpu_map; /* setup hardware processor id to cpu id mapping */ for (i = 0; i< MAXCPU; i++) xlp_cpuid_to_hwtid[i] = xlp_hwtid_to_cpuid[i] = -1; for (i = 0, k = 0; i < XLP_MAX_CORES; i++) { if (((cpu_map >> (i * 4)) & 0xf) == 0) continue; for (j = 0; j < xlp_threads_per_core; j++) { xlp_cpuid_to_hwtid[k] = i * 4 + j; xlp_hwtid_to_cpuid[i * 4 + j] = k; k++; } } return; unsupp: printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n" "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n", (u_long)core0_thr_mask, (u_long)cpu_map); panic("Invalid CPU mask - halting.\n"); return; } /* Parse cmd line args as env - copied from ar71xx */ static void xlp_parse_bootargs(char *cmdline) { char *n, *v; while ((v = strsep(&cmdline, " \n")) != NULL) { if (*v == '\0') continue; if (*v == '-') { while (*v != '\0') { v++; switch (*v) { case 'a': boothowto |= RB_ASKNAME; break; case 'd': boothowto |= RB_KDB; break; case 'g': boothowto |= RB_GDB; break; case 's': boothowto |= RB_SINGLE; break; case 'v': boothowto |= RB_VERBOSE; break; } } } else { n = strsep(&v, "="); if (v == NULL) kern_setenv(n, "1"); else kern_setenv(n, v); } } } #ifdef FDT static void xlp_bootargs_init(__register_t arg) { char buf[2048]; /* early stack is big enough */ void *dtbp; phandle_t chosen; ihandle_t mask; dtbp = (void *)(intptr_t)arg; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not passed as argument try * to use the statically embedded one. */ if (dtbp == NULL) dtbp = &fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) while (1); if (OF_init((void *)dtbp) != 0) while (1); OF_interpret("perform-fixup", 0); chosen = OF_finddevice("/chosen"); if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) { xlp_hw_thread_mask = mask; } if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1) xlp_parse_bootargs(buf); } #else /* * arg is a pointer to the environment block, the format of the block is * a=xyz\0b=pqr\0\0 */ static void xlp_bootargs_init(__register_t arg) { char buf[2048]; /* early stack is big enough */ char *p, *v, *n; uint32_t mask; /* * provide backward compat for passing cpu mask as arg */ if (arg & 1) { xlp_hw_thread_mask = arg; return; } p = (void *)(intptr_t)arg; while (*p != '\0') { strlcpy(buf, p, sizeof(buf)); v = buf; n = strsep(&v, "="); if (v == NULL) kern_setenv(n, "1"); else kern_setenv(n, v); p += strlen(p) + 1; } /* CPU mask can be passed thru env */ if (getenv_uint("cpumask", &mask) != 0) xlp_hw_thread_mask = mask; /* command line argument */ v = kern_getenv("bootargs"); if (v != NULL) { strlcpy(buf, v, sizeof(buf)); xlp_parse_bootargs(buf); freeenv(v); } } #endif static void mips_init(void) { init_param1(); init_param2(physmem); mips_cpu_init(); cpuinfo.cache_coherent_dma = TRUE; pmap_bootstrap(); mips_proc0_init(); mutex_init(); #ifdef DDB kdb_init(); if (boothowto & RB_KDB) { kdb_enter("Boot flags requested debugger", NULL); } #endif } unsigned int platform_get_timecount(struct timecounter *tc __unused) { uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER); return (unsigned int)~count; } static void xlp_pic_init(void) { struct timecounter pic_timecounter = { platform_get_timecount, /* get_timecount */ 0, /* no poll_pps */ ~0U, /* counter_mask */ XLP_IO_CLK, /* frequency */ "XLRPIC", /* name */ 2000, /* quality (adjusted in code) */ }; int i; int maxirt; xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */ maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()), XLP_PCI_DEVINFO_REG0); printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base, maxirt); /* Bind all PIC irqs to cpu 0 */ for (i = 0; i < maxirt; i++) nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0, 1, 0, 0x1); nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0); platform_timecounter = &pic_timecounter; } #if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */ #ifdef XLP_SIM #define XLP_MEM_LIM 0x200000000ULL #else #define XLP_MEM_LIM 0x10000000000ULL #endif #else #define XLP_MEM_LIM 0xfffff000UL #endif static vm_paddr_t xlp_mem_excl[] = { 0, 0, /* for kernel image region, see xlp_mem_init */ 0x0c000000, 0x14000000, /* uboot area, cms queue and other stuff */ 0x1fc00000, 0x1fd00000, /* reset vec */ 0x1e000000, 0x1e200000, /* poe buffers */ }; static int mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend) { int i, pos; pos = 0; for (i = 0; i < nitems(xlp_mem_excl); i += 2) { if (mstart > xlp_mem_excl[i + 1]) continue; if (mstart < xlp_mem_excl[i]) { avail[pos++] = mstart; if (mend < xlp_mem_excl[i]) avail[pos++] = mend; else avail[pos++] = xlp_mem_excl[i]; } mstart = xlp_mem_excl[i + 1]; if (mend <= mstart) break; } if (mstart < mend) { avail[pos++] = mstart; avail[pos++] = mend; } return (pos); } static void xlp_mem_init(void) { vm_paddr_t physsz, tmp; uint64_t bridgebase, base, lim, val; int i, j, k, n; /* update kernel image area in exclude regions */ tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end); tmp = round_page(tmp) + 0x20000; /* round up */ xlp_mem_excl[1] = tmp; printf("Memory (from DRAM BARs):\n"); bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */ physsz = 0; for (i = 0, j = 0; i < 8; i++) { val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i)); val = (val >> 12) & 0xfffff; base = val << 20; val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i)); val = (val >> 12) & 0xfffff; if (val == 0) /* BAR not enabled */ continue; lim = (val + 1) << 20; printf(" BAR %d: %#jx - %#jx : ", i, (intmax_t)base, (intmax_t)lim); if (lim <= base) { printf("\tskipped - malformed %#jx -> %#jx\n", (intmax_t)base, (intmax_t)lim); continue; } else if (base >= XLP_MEM_LIM) { printf(" skipped - outside usable limit %#jx.\n", (intmax_t)XLP_MEM_LIM); continue; } else if (lim >= XLP_MEM_LIM) { lim = XLP_MEM_LIM; printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM); } else printf(" usable\n"); /* exclude unusable regions from BAR and add rest */ n = mem_exclude_add(&phys_avail[j], base, lim); for (k = j; k < j + n; k += 2) { physsz += phys_avail[k + 1] - phys_avail[k]; printf("\tMem[%d]: %#jx - %#jx\n", k/2, (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]); } j = k; } /* setup final entry with 0 */ phys_avail[j] = phys_avail[j + 1] = 0; /* copy phys_avail to dump_avail */ for (i = 0; i <= j + 1; i++) dump_avail[i] = phys_avail[i]; realmem = physmem = btoc(physsz); } void platform_start(__register_t a0 __unused, __register_t a1 __unused, __register_t a2 __unused, __register_t a3 __unused) { /* Initialize pcpu stuff */ mips_pcpu0_init(); /* initialize console so that we have printf */ boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */ init_static_kenv(boot1_env, sizeof(boot1_env)); xlp_bootargs_init(a0); /* clockrate used by delay, so initialize it here */ xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0); cpu_clock = xlp_cpu_frequency / 1000000; mips_timer_early_init(xlp_cpu_frequency); /* Init console please */ cninit(); /* Early core init and fixes for errata */ xlp_setup_core(); xlp_parse_mmu_options(); xlp_mem_init(); bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC, XLPResetEntryEnd - XLPResetEntry); #ifdef SMP /* * We will enable the other threads in core 0 here * so that the TLB and cache info is correct when * mips_init runs */ xlp_enable_threads(xlp_mmuval); #endif /* setup for the startup core */ xlp_setup_mmu(); xlp_enable_blocks(); /* Read/Guess/setup board information */ nlm_board_info_setup(); /* MIPS generic init */ mips_init(); /* * XLP specific post initialization * initialize other on chip stuff */ xlp_pic_init(); mips_timer_init_params(xlp_cpu_frequency, 0); } void platform_cpu_init() { } void platform_reset(void) { uint64_t sysbase = nlm_get_sys_regbase(0); nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1); for( ; ; ) __asm __volatile("wait"); } #ifdef SMP /* * XLP threads are started simultaneously when we enable threads, this will * ensure that the threads are blocked in platform_init_ap, until they are * ready to proceed to smp_init_secondary() */ static volatile int thr_unblock[4]; int platform_start_ap(int cpuid) { uint32_t coremask, val; uint64_t sysbase = nlm_get_sys_regbase(0); int hwtid = xlp_cpuid_to_hwtid[cpuid]; int core, thr; core = hwtid / 4; thr = hwtid % 4; if (thr == 0) { /* First thread in core, do core wake up */ coremask = 1u << core; /* Enable core clock */ val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL); val &= ~coremask; nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val); /* Remove CPU Reset */ val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); val &= ~coremask & 0xff; nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val); if (bootverbose) printf("Waking up core %d ...", core); /* Poll for CPU to mark itself coherent */ do { val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE); } while ((val & coremask) != 0); if (bootverbose) printf("Done\n"); } else { /* otherwise release the threads stuck in platform_init_ap */ thr_unblock[thr] = 1; } return (0); } void platform_init_ap(int cpuid) { uint32_t stat; int thr; /* The first thread has to setup the MMU and enable other threads */ thr = nlm_threadid(); if (thr == 0) { xlp_setup_core(); xlp_enable_threads(xlp_mmuval); } else { /* * FIXME busy wait here eats too many cycles, especially * in the core 0 while bootup */ while (thr_unblock[thr] == 0) __asm__ __volatile__ ("nop;nop;nop;nop"); thr_unblock[thr] = 0; } xlp_setup_mmu(); stat = mips_rd_status(); KASSERT((stat & MIPS_SR_INT_IE) == 0, ("Interrupts enabled in %s!", __func__)); stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT; mips_wr_status(stat); nlm_write_c0_eimr(0ull); xlp_enable_irq(IRQ_IPI); xlp_enable_irq(IRQ_TIMER); xlp_enable_irq(IRQ_MSGRING); return; } int -platform_ipi_intrnum(void) +platform_ipi_hardintr_num(void) { return (IRQ_IPI); } +int +platform_ipi_softintr_num(void) +{ + + return (-1); +} + void platform_ipi_send(int cpuid) { nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid], - platform_ipi_intrnum(), 0); + platform_ipi_hardintr_num(), 0); } void platform_ipi_clear(void) { } int platform_processor_id(void) { return (xlp_hwtid_to_cpuid[nlm_cpuid()]); } void platform_cpu_mask(cpuset_t *mask) { int i, s; CPU_ZERO(mask); s = xlp_ncores * xlp_threads_per_core; for (i = 0; i < s; i++) CPU_SET(i, mask); } struct cpu_group * platform_smp_topo() { return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1, xlp_threads_per_core, CG_FLAG_THREAD)); } #endif Index: head/sys/mips/rmi/xlr_machdep.c =================================================================== --- head/sys/mips/rmi/xlr_machdep.c (revision 305620) +++ head/sys/mips/rmi/xlr_machdep.c (revision 305621) @@ -1,617 +1,624 @@ /*- * Copyright (c) 2006-2009 RMI Corporation * Copyright (c) 2002-2004 Juli Mallett * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include /* cinit() */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void mpwait(void); unsigned long xlr_io_base = (unsigned long)(DEFAULT_XLR_IO_BASE); /* 4KB static data aread to keep a copy of the bootload env until the dynamic kenv is setup */ char boot1_env[4096]; int rmi_spin_mutex_safe=0; struct mtx xlr_pic_lock; /* * Parameters from boot loader */ struct boot1_info xlr_boot1_info; int xlr_run_mode; int xlr_argc; int32_t *xlr_argv, *xlr_envp; uint64_t cpu_mask_info; uint32_t xlr_online_cpumask; uint32_t xlr_core_cpu_mask = 0x1; /* Core 0 thread 0 is always there */ int xlr_shtlb_enabled; int xlr_ncores; int xlr_threads_per_core; uint32_t xlr_hw_thread_mask; int xlr_cpuid_to_hwtid[MAXCPU]; int xlr_hwtid_to_cpuid[MAXCPU]; static void xlr_setup_mmu_split(void) { uint64_t mmu_setup; int val = 0; if (xlr_threads_per_core == 4 && xlr_shtlb_enabled == 0) return; /* no change from boot setup */ switch (xlr_threads_per_core) { case 1: val = 0; break; case 2: val = 2; break; case 4: val = 3; break; } mmu_setup = read_xlr_ctrl_register(4, 0); mmu_setup = mmu_setup & ~0x06; mmu_setup |= (val << 1); /* turn on global mode */ if (xlr_shtlb_enabled) mmu_setup |= 0x01; write_xlr_ctrl_register(4, 0, mmu_setup); } static void xlr_parse_mmu_options(void) { #ifdef notyet char *hw_env, *start, *end; #endif uint32_t cpu_map; uint8_t core0_thr_mask, core_thr_mask; int i, j, k; /* First check for the shared TLB setup */ xlr_shtlb_enabled = 0; #ifdef notyet /* * We don't support sharing TLB per core - TODO */ xlr_shtlb_enabled = 0; if ((hw_env = kern_getenv("xlr.shtlb")) != NULL) { start = hw_env; tmp = strtoul(start, &end, 0); if (start != end) xlr_shtlb_enabled = (tmp != 0); else printf("Bad value for xlr.shtlb [%s]\n", hw_env); freeenv(hw_env); } #endif /* * XLR supports splitting the 64 TLB entries across one, two or four * threads (split mode). XLR also allows the 64 TLB entries to be shared * across all threads in the core using a global flag (shared TLB mode). * We will support 1/2/4 threads in split mode or shared mode. * */ xlr_ncores = 1; cpu_map = xlr_boot1_info.cpu_online_map; #ifndef SMP /* Uniprocessor! */ if (cpu_map != 0x1) { printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n" "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map); cpu_map = 0x1; } #endif core0_thr_mask = cpu_map & 0xf; switch (core0_thr_mask) { case 1: xlr_threads_per_core = 1; break; case 3: xlr_threads_per_core = 2; break; case 0xf: xlr_threads_per_core = 4; break; default: goto unsupp; } /* Verify other cores CPU masks */ for (i = 1; i < XLR_MAX_CORES; i++) { core_thr_mask = (cpu_map >> (i*4)) & 0xf; if (core_thr_mask) { if (core_thr_mask != core0_thr_mask) goto unsupp; xlr_ncores++; } } xlr_hw_thread_mask = cpu_map; /* setup hardware processor id to cpu id mapping */ for (i = 0; i< MAXCPU; i++) xlr_cpuid_to_hwtid[i] = xlr_hwtid_to_cpuid [i] = -1; for (i = 0, k = 0; i < XLR_MAX_CORES; i++) { if (((cpu_map >> (i*4)) & 0xf) == 0) continue; for (j = 0; j < xlr_threads_per_core; j++) { xlr_cpuid_to_hwtid[k] = i*4 + j; xlr_hwtid_to_cpuid[i*4 + j] = k; k++; } } /* setup for the startup core */ xlr_setup_mmu_split(); return; unsupp: printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n" "\tcore0 thread mask [%lx], boot cpu mask [%lx]\n" "\tUsing default, 16 TLB entries per CPU, split mode\n", (u_long)core0_thr_mask, (u_long)cpu_map); panic("Invalid CPU mask - halting.\n"); return; } static void xlr_set_boot_flags(void) { char *p; p = kern_getenv("bootflags"); if (p == NULL) p = kern_getenv("boot_flags"); /* old style */ if (p == NULL) return; for (; p && *p != '\0'; p++) { switch (*p) { case 'd': case 'D': boothowto |= RB_KDB; break; case 'g': case 'G': boothowto |= RB_GDB; break; case 'v': case 'V': boothowto |= RB_VERBOSE; break; case 's': /* single-user (default, supported for sanity) */ case 'S': boothowto |= RB_SINGLE; break; default: printf("Unrecognized boot flag '%c'.\n", *p); break; } } freeenv(p); return; } extern uint32_t _end; static void mips_init(void) { init_param1(); init_param2(physmem); mips_cpu_init(); cpuinfo.cache_coherent_dma = TRUE; pmap_bootstrap(); #ifdef DDB kdb_init(); if (boothowto & RB_KDB) { kdb_enter("Boot flags requested debugger", NULL); } #endif mips_proc0_init(); mutex_init(); } u_int platform_get_timecount(struct timecounter *tc __unused) { return (0xffffffffU - pic_timer_count32(PIC_CLOCK_TIMER)); } static void xlr_pic_init(void) { struct timecounter pic_timecounter = { platform_get_timecount, /* get_timecount */ 0, /* no poll_pps */ ~0U, /* counter_mask */ PIC_TIMER_HZ, /* frequency */ "XLRPIC", /* name */ 2000, /* quality (adjusted in code) */ }; xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); int i, irq; write_c0_eimr64(0ULL); mtx_init(&xlr_pic_lock, "pic", NULL, MTX_SPIN); xlr_write_reg(mmio, PIC_CTRL, 0); /* Initialize all IRT entries */ for (i = 0; i < PIC_NUM_IRTS; i++) { irq = PIC_INTR_TO_IRQ(i); /* * Disable all IRTs. Set defaults (local scheduling, high * polarity, level * triggered, and CPU irq) */ xlr_write_reg(mmio, PIC_IRT_1(i), (1 << 30) | (1 << 6) | irq); /* Bind all PIC irqs to cpu 0 */ xlr_write_reg(mmio, PIC_IRT_0(i), 0x01); } /* Setup timer 7 of PIC as a timestamp, no interrupts */ pic_init_timer(PIC_CLOCK_TIMER); pic_set_timer(PIC_CLOCK_TIMER, ~UINT64_C(0)); platform_timecounter = &pic_timecounter; } static void xlr_mem_init(void) { struct xlr_boot1_mem_map *boot_map; vm_size_t physsz = 0; int i, j; /* get physical memory info from boot loader */ boot_map = (struct xlr_boot1_mem_map *) (unsigned long)xlr_boot1_info.psb_mem_map; for (i = 0, j = 0; i < boot_map->num_entries; i++, j += 2) { if (boot_map->physmem_map[i].type != BOOT1_MEM_RAM) continue; if (j == 14) { printf("*** ERROR *** memory map too large ***\n"); break; } if (j == 0) { /* start after kernel end */ phys_avail[0] = (vm_paddr_t) MIPS_KSEG0_TO_PHYS(&_end) + 0x20000; /* boot loader start */ /* HACK to Use bootloaders memory region */ if (boot_map->physmem_map[0].size == 0x0c000000) { boot_map->physmem_map[0].size = 0x0ff00000; } phys_avail[1] = boot_map->physmem_map[0].addr + boot_map->physmem_map[0].size; printf("First segment: addr:%#jx -> %#jx \n", (uintmax_t)phys_avail[0], (uintmax_t)phys_avail[1]); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1]; } else { #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */ /* * In 32 bit physical address mode we cannot use * mem > 0xffffffff */ if (boot_map->physmem_map[i].addr > 0xfffff000U) { printf("Memory: start %#jx size %#jx ignored" "(>4GB)\n", (intmax_t)boot_map->physmem_map[i].addr, (intmax_t)boot_map->physmem_map[i].size); continue; } if (boot_map->physmem_map[i].addr + boot_map->physmem_map[i].size > 0xfffff000U) { boot_map->physmem_map[i].size = 0xfffff000U - boot_map->physmem_map[i].addr; printf("Memory: start %#jx limited to 4GB\n", (intmax_t)boot_map->physmem_map[i].addr); } #endif /* !PHYSADDR_64_BIT */ phys_avail[j] = (vm_paddr_t) boot_map->physmem_map[i].addr; phys_avail[j + 1] = phys_avail[j] + boot_map->physmem_map[i].size; printf("Next segment : addr:%#jx -> %#jx\n", (uintmax_t)phys_avail[j], (uintmax_t)phys_avail[j+1]); } dump_avail[j] = phys_avail[j]; dump_avail[j+1] = phys_avail[j+1]; physsz += boot_map->physmem_map[i].size; } phys_avail[j] = phys_avail[j + 1] = 0; realmem = physmem = btoc(physsz); } void platform_start(__register_t a0 __unused, __register_t a1 __unused, __register_t a2 __unused, __register_t a3 __unused) { int i; #ifdef SMP uint32_t tmp; void (*wakeup) (void *, void *, unsigned int); #endif /* Save boot loader and other stuff from scratch regs */ xlr_boot1_info = *(struct boot1_info *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 0); cpu_mask_info = read_c0_register64(MIPS_COP_0_OSSCRATCH, 1); xlr_online_cpumask = read_c0_register32(MIPS_COP_0_OSSCRATCH, 2); xlr_run_mode = read_c0_register32(MIPS_COP_0_OSSCRATCH, 3); xlr_argc = read_c0_register32(MIPS_COP_0_OSSCRATCH, 4); /* * argv and envp are passed in array of 32bit pointers */ xlr_argv = (int32_t *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 5); xlr_envp = (int32_t *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 6); /* Initialize pcpu stuff */ mips_pcpu0_init(); /* initialize console so that we have printf */ boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */ /* clockrate used by delay, so initialize it here */ cpu_clock = xlr_boot1_info.cpu_frequency / 1000000; /* * Note the time counter on CPU0 runs not at system clock speed, but * at PIC time counter speed (which is returned by * platform_get_frequency(). Thus we do not use * xlr_boot1_info.cpu_frequency here. */ mips_timer_early_init(xlr_boot1_info.cpu_frequency); /* Init console please */ cninit(); init_static_kenv(boot1_env, sizeof(boot1_env)); printf("Environment (from %d args):\n", xlr_argc - 1); if (xlr_argc == 1) printf("\tNone\n"); for (i = 1; i < xlr_argc; i++) { char *n, *arg; arg = (char *)(intptr_t)xlr_argv[i]; printf("\t%s\n", arg); n = strsep(&arg, "="); if (arg == NULL) kern_setenv(n, "1"); else kern_setenv(n, arg); } xlr_set_boot_flags(); xlr_parse_mmu_options(); xlr_mem_init(); /* Set up hz, among others. */ mips_init(); #ifdef SMP /* * If thread 0 of any core is not available then mark whole core as * not available */ tmp = xlr_boot1_info.cpu_online_map; for (i = 4; i < MAXCPU; i += 4) { if ((tmp & (0xf << i)) && !(tmp & (0x1 << i))) { /* * Oops.. thread 0 is not available. Disable whole * core */ tmp = tmp & ~(0xf << i); printf("WARNING: Core %d is disabled because thread 0" " of this core is not enabled.\n", i / 4); } } xlr_boot1_info.cpu_online_map = tmp; /* Wakeup Other cpus, and put them in bsd park code. */ wakeup = ((void (*) (void *, void *, unsigned int)) (unsigned long)(xlr_boot1_info.wakeup)); printf("Waking up CPUs 0x%jx.\n", (intmax_t)xlr_boot1_info.cpu_online_map & ~(0x1U)); if (xlr_boot1_info.cpu_online_map & ~(0x1U)) wakeup(mpwait, 0, (unsigned int)xlr_boot1_info.cpu_online_map); #endif /* xlr specific post initialization */ /* initialize other on chip stuff */ xlr_board_info_setup(); xlr_msgring_config(); xlr_pic_init(); xlr_msgring_cpu_init(); mips_timer_init_params(xlr_boot1_info.cpu_frequency, 0); printf("Platform specific startup now completes\n"); } void platform_cpu_init() { } void platform_reset(void) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_GPIO_OFFSET); /* write 1 to GPIO software reset register */ xlr_write_reg(mmio, 8, 1); } #ifdef SMP int xlr_ap_release[MAXCPU]; int platform_start_ap(int cpuid) { int hwid = xlr_cpuid_to_hwtid[cpuid]; if (xlr_boot1_info.cpu_online_map & (1< __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_kdb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #include #endif #ifdef CFE #include #endif #include "sb_scd.h" #ifdef DDB #ifndef KDB #error KDB must be enabled in order for DDB to work! #endif #endif #ifdef CFE_ENV extern void cfe_env_init(void); #endif extern int *edata; extern int *end; extern char MipsTLBMiss[], MipsTLBMissEnd[]; void platform_cpu_init() { /* Nothing special */ } static void sb_intr_init(int cpuid) { int intrnum, intsrc; /* * Disable all sources to the interrupt mapper and setup the mapping * between an interrupt source and the mips hard interrupt number. */ for (intsrc = 0; intsrc < NUM_INTSRC; ++intsrc) { intrnum = sb_route_intsrc(intsrc); sb_disable_intsrc(cpuid, intsrc); sb_write_intmap(cpuid, intsrc, intrnum); #ifdef SMP /* * Set up the mailbox interrupt mapping. * * The mailbox interrupt is "special" in that it is not shared * with any other interrupt source. */ if (intsrc == INTSRC_MAILBOX3) { - intrnum = platform_ipi_intrnum(); + intrnum = platform_ipi_hardintr_num(); sb_write_intmap(cpuid, INTSRC_MAILBOX3, intrnum); sb_enable_intsrc(cpuid, INTSRC_MAILBOX3); } #endif } } static void mips_init(void) { int i, j, cfe_mem_idx, tmp; uint64_t maxmem; #ifdef CFE_ENV cfe_env_init(); #endif TUNABLE_INT_FETCH("boothowto", &boothowto); if (boothowto & RB_VERBOSE) bootverbose++; #ifdef MAXMEM tmp = MAXMEM; #else tmp = 0; #endif TUNABLE_INT_FETCH("hw.physmem", &tmp); maxmem = (uint64_t)tmp * 1024; /* * XXX * If we used vm_paddr_t consistently in pmap, etc., we could * use 64-bit page numbers on !n64 systems, too, like i386 * does with PAE. */ #if !defined(__mips_n64) if (maxmem == 0 || maxmem > 0xffffffff) maxmem = 0xffffffff; #endif #ifdef CFE /* * Query DRAM memory map from CFE. */ physmem = 0; cfe_mem_idx = 0; for (i = 0; i < 10; i += 2) { int result; uint64_t addr, len, type; result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type); if (result < 0) { phys_avail[i] = phys_avail[i + 1] = 0; break; } KASSERT(type == CFE_MI_AVAILABLE, ("CFE DRAM region is not available?")); if (bootverbose) printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len); if (maxmem != 0) { if (addr >= maxmem) { printf("Ignoring %ju bytes of memory at 0x%jx " "that is above maxmem %dMB\n", len, addr, (int)(maxmem / (1024 * 1024))); continue; } if (addr + len > maxmem) { printf("Ignoring %ju bytes of memory " "that is above maxmem %dMB\n", (addr + len) - maxmem, (int)(maxmem / (1024 * 1024))); len = maxmem - addr; } } phys_avail[i] = addr; if (i == 0 && addr == 0) { /* * If this is the first physical memory segment probed * from CFE, omit the region at the start of physical * memory where the kernel has been loaded. */ phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); } phys_avail[i + 1] = addr + len; physmem += len; } realmem = btoc(physmem); #endif for (j = 0; j < i; j++) dump_avail[j] = phys_avail[j]; physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); /* * Sibyte has a L1 data cache coherent with DMA. This includes * on-chip network interfaces as well as PCI/HyperTransport bus * masters. */ cpuinfo.cache_coherent_dma = TRUE; /* * XXX * The kernel is running in 32-bit mode but the CFE is running in * 64-bit mode. So the SR_KX bit in the status register is turned * on by the CFE every time we call into it - for e.g. CFE_CONSOLE. * * This means that if get a TLB miss for any address above 0xc0000000 * and the SR_KX bit is set then we will end up in the XTLB exception * vector. * * For now work around this by copying the TLB exception handling * code to the XTLB exception vector. */ { bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); mips_icache_sync_all(); mips_dcache_wbinv_all(); } pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } void platform_reset(void) { /* * XXX SMP * XXX flush data caches */ sb_system_reset(); } static void kseg0_map_coherent(void) { uint32_t config; const int CFG_K0_COHERENT = 5; config = mips_rd_config(); config &= ~MIPS_CONFIG_K0_MASK; config |= CFG_K0_COHERENT; mips_wr_config(config); } #ifdef SMP void platform_ipi_send(int cpuid) { KASSERT(cpuid == 0 || cpuid == 1, ("platform_ipi_send: invalid cpuid %d", cpuid)); sb_set_mailbox(cpuid, 1ULL); } void platform_ipi_clear(void) { int cpuid; cpuid = PCPU_GET(cpuid); sb_clear_mailbox(cpuid, 1ULL); } int -platform_ipi_intrnum(void) +platform_ipi_hardintr_num(void) { return (4); } +int +platform_ipi_softintr_num(void) +{ + + return (-1); +} + struct cpu_group * platform_smp_topo(void) { return (smp_topo_none()); } void platform_init_ap(int cpuid) { int ipi_int_mask, clock_int_mask; KASSERT(cpuid == 1, ("AP has an invalid cpu id %d", cpuid)); /* * Make sure that kseg0 is mapped cacheable-coherent */ kseg0_map_coherent(); sb_intr_init(cpuid); /* * Unmask the clock and ipi interrupts. */ clock_int_mask = hard_int_mask(5); - ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); + ipi_int_mask = hard_int_mask(platform_ipi_hardintr_num()); set_intr_mask(ipi_int_mask | clock_int_mask); } int platform_start_ap(int cpuid) { #ifdef CFE int error; if ((error = cfe_cpu_start(cpuid, mpentry, 0, 0, 0))) { printf("cfe_cpu_start error: %d\n", error); return (-1); } else { return (0); } #else return (-1); #endif /* CFE */ } #endif /* SMP */ static u_int sb_get_timecount(struct timecounter *tc) { return ((u_int)sb_zbbus_cycle_count()); } static void sb_timecounter_init(void) { static struct timecounter sb_timecounter = { sb_get_timecount, NULL, ~0u, 0, "sibyte_zbbus_counter", 2000 }; /* * The ZBbus cycle counter runs at half the cpu frequency. */ sb_timecounter.tc_frequency = sb_cpu_speed() / 2; platform_timecounter = &sb_timecounter; } void platform_start(__register_t a0, __register_t a1, __register_t a2, __register_t a3) { /* * Make sure that kseg0 is mapped cacheable-coherent */ kseg0_map_coherent(); /* clear the BSS and SBSS segments */ memset(&edata, 0, (vm_offset_t)&end - (vm_offset_t)&edata); mips_postboot_fixup(); sb_intr_init(0); sb_timecounter_init(); /* Initialize pcpu stuff */ mips_pcpu0_init(); #ifdef CFE /* * Initialize CFE firmware trampolines before * we initialize the low-level console. * * CFE passes the following values in registers: * a0: firmware handle * a2: firmware entry point * a3: entry point seal */ if (a3 == CFE_EPTSEAL) cfe_init(a0, a2); #endif cninit(); mips_init(); mips_timer_init_params(sb_cpu_speed(), 0); } Index: head/sys/mips/sibyte/sb_scd.c =================================================================== --- head/sys/mips/sibyte/sb_scd.c (revision 305620) +++ head/sys/mips/sibyte/sb_scd.c (revision 305621) @@ -1,306 +1,306 @@ /*- * Copyright (c) 2009 Neelkanth Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "sb_scd.h" /* * We compile a 32-bit kernel to run on the SB-1 processor which is a 64-bit * processor. It has some registers that must be accessed using 64-bit load * and store instructions. * * We use the mips_ld() and mips_sd() functions to do this for us. */ #define sb_store64(addr, val) mips3_sd((uint64_t *)(uintptr_t)(addr), (val)) #define sb_load64(addr) mips3_ld((uint64_t *)(uintptr_t)(addr)) /* * System Control and Debug (SCD) unit on the Sibyte ZBbus. */ /* * Extract the value starting at bit position 'b' for 'n' bits from 'x'. */ #define GET_VAL_64(x, b, n) (((x) >> (b)) & ((1ULL << (n)) - 1)) #define SYSREV_ADDR MIPS_PHYS_TO_KSEG1(0x10020000) #define SYSREV_NUM_PROCESSORS(x) GET_VAL_64((x), 24, 4) #define SYSCFG_ADDR MIPS_PHYS_TO_KSEG1(0x10020008) #define SYSCFG_PLLDIV(x) GET_VAL_64((x), 7, 5) #define ZBBUS_CYCLE_COUNT_ADDR MIPS_PHYS_TO_KSEG1(0x10030000) #define INTSRC_MASK_ADDR(cpu) \ (MIPS_PHYS_TO_KSEG1(0x10020028) | ((cpu) << 13)) #define INTSRC_MAP_ADDR(cpu, intsrc) \ (MIPS_PHYS_TO_KSEG1(0x10020200) | ((cpu) << 13)) + (intsrc * 8) #define MAILBOX_SET_ADDR(cpu) \ (MIPS_PHYS_TO_KSEG1(0x100200C8) | ((cpu) << 13)) #define MAILBOX_CLEAR_ADDR(cpu) \ (MIPS_PHYS_TO_KSEG1(0x100200D0) | ((cpu) << 13)) static uint64_t sb_read_syscfg(void) { return (sb_load64(SYSCFG_ADDR)); } static void sb_write_syscfg(uint64_t val) { sb_store64(SYSCFG_ADDR, val); } uint64_t sb_zbbus_cycle_count(void) { return (sb_load64(ZBBUS_CYCLE_COUNT_ADDR)); } uint64_t sb_cpu_speed(void) { int plldiv; const uint64_t MHZ = 1000000; plldiv = SYSCFG_PLLDIV(sb_read_syscfg()); if (plldiv == 0) { printf("PLL_DIV is 0 - assuming 6 (300MHz).\n"); plldiv = 6; } return (plldiv * 50 * MHZ); } void sb_system_reset(void) { uint64_t syscfg; const uint64_t SYSTEM_RESET = 1ULL << 60; const uint64_t EXT_RESET = 1ULL << 59; const uint64_t SOFT_RESET = 1ULL << 58; syscfg = sb_read_syscfg(); syscfg &= ~SOFT_RESET; syscfg |= SYSTEM_RESET | EXT_RESET; sb_write_syscfg(syscfg); } void sb_disable_intsrc(int cpu, int src) { int regaddr; uint64_t val; regaddr = INTSRC_MASK_ADDR(cpu); val = sb_load64(regaddr); val |= 1ULL << src; sb_store64(regaddr, val); } void sb_enable_intsrc(int cpu, int src) { int regaddr; uint64_t val; regaddr = INTSRC_MASK_ADDR(cpu); val = sb_load64(regaddr); val &= ~(1ULL << src); sb_store64(regaddr, val); } void sb_write_intsrc_mask(int cpu, uint64_t val) { int regaddr; regaddr = INTSRC_MASK_ADDR(cpu); sb_store64(regaddr, val); } uint64_t sb_read_intsrc_mask(int cpu) { int regaddr; uint64_t val; regaddr = INTSRC_MASK_ADDR(cpu); val = sb_load64(regaddr); return (val); } void sb_write_intmap(int cpu, int intsrc, int intrnum) { int regaddr; regaddr = INTSRC_MAP_ADDR(cpu, intsrc); sb_store64(regaddr, intrnum); } int sb_read_intmap(int cpu, int intsrc) { int regaddr; regaddr = INTSRC_MAP_ADDR(cpu, intsrc); return (sb_load64(regaddr) & 0x7); } int sb_route_intsrc(int intsrc) { int intrnum; KASSERT(intsrc >= 0 && intsrc < NUM_INTSRC, ("Invalid interrupt source number (%d)", intsrc)); /* * Interrupt 5 is used by sources internal to the CPU (e.g. timer). * Use a deterministic mapping for the remaining sources. */ #ifdef SMP - KASSERT(platform_ipi_intrnum() == 4, + KASSERT(platform_ipi_hardintr_num() == 4, ("Unexpected interrupt number used for IPI")); intrnum = intsrc % 4; #else intrnum = intsrc % 5; #endif return (intrnum); } #ifdef SMP static uint64_t sb_read_sysrev(void) { return (sb_load64(SYSREV_ADDR)); } void sb_set_mailbox(int cpu, uint64_t val) { int regaddr; regaddr = MAILBOX_SET_ADDR(cpu); sb_store64(regaddr, val); } void sb_clear_mailbox(int cpu, uint64_t val) { int regaddr; regaddr = MAILBOX_CLEAR_ADDR(cpu); sb_store64(regaddr, val); } void platform_cpu_mask(cpuset_t *mask) { int i, s; CPU_ZERO(mask); s = SYSREV_NUM_PROCESSORS(sb_read_sysrev()); for (i = 0; i < s; i++) CPU_SET(i, mask); } #endif /* SMP */ #define SCD_PHYSADDR 0x10000000 #define SCD_SIZE 0x00060000 static int scd_probe(device_t dev) { device_set_desc(dev, "Broadcom/Sibyte System Control and Debug"); return (0); } static int scd_attach(device_t dev) { int rid; struct resource *res; if (bootverbose) device_printf(dev, "attached.\n"); rid = 0; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, SCD_PHYSADDR, SCD_PHYSADDR + SCD_SIZE - 1, SCD_SIZE, 0); if (res == NULL) panic("Cannot allocate resource for system control and debug."); return (0); } static device_method_t scd_methods[] ={ /* Device interface */ DEVMETHOD(device_probe, scd_probe), DEVMETHOD(device_attach, scd_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static driver_t scd_driver = { "scd", scd_methods }; static devclass_t scd_devclass; DRIVER_MODULE(scd, zbbus, scd_driver, scd_devclass, 0, 0);