Index: head/sys/conf/files.powerpc =================================================================== --- head/sys/conf/files.powerpc (revision 345425) +++ head/sys/conf/files.powerpc (revision 345426) @@ -1,278 +1,277 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # font.h optional sc \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # # There is only an asm version on ppc64. cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs powerpc | dtrace powerpc | zfs powerpcspe | dtrace powerpcspe compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/atomic/powerpc64/opensolaris_atomic.S optional zfs powerpc64 | dtrace powerpc64 compile-with "${ZFS_S}" cddl/dev/dtrace/powerpc/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/powerpc/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/powerpc/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb dev/bm/if_bm.c optional bm powermac dev/adb/adb_bus.c optional adb dev/adb/adb_kbd.c optional adb dev/adb/adb_mouse.c optional adb dev/adb/adb_hb_if.m optional adb dev/adb/adb_if.m optional adb dev/adb/adb_buttons.c optional adb dev/agp/agp_apple.c optional agp powermac dev/fb/fb.c optional sc dev/hwpmc/hwpmc_e500.c optional hwpmc dev/hwpmc/hwpmc_mpc7xxx.c optional hwpmc dev/hwpmc/hwpmc_powerpc.c optional hwpmc dev/hwpmc/hwpmc_ppc970.c optional hwpmc dev/iicbus/ad7417.c optional ad7417 powermac dev/iicbus/adm1030.c optional powermac windtunnel | adm1030 powermac dev/iicbus/adt746x.c optional adt746x powermac dev/iicbus/ds1631.c optional ds1631 powermac dev/iicbus/ds1775.c optional ds1775 powermac dev/iicbus/max6690.c optional max6690 powermac dev/iicbus/ofw_iicbus.c optional iicbus aim dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_opal.c optional powernv ipmi dev/nand/nfc_fsl.c optional nand mpc85xx dev/nand/nfc_rb.c optional nand mpc85xx # Most ofw stuff below is brought in by conf/files for options FDT, but # we always want it, even on non-FDT platforms. dev/fdt/simplebus.c standard dev/ofw/openfirm.c standard dev/ofw/openfirmio.c standard dev/ofw/ofw_bus_if.m standard dev/ofw/ofw_cpu.c standard dev/ofw/ofw_if.m standard dev/ofw/ofw_bus_subr.c standard dev/ofw/ofw_console.c optional aim dev/ofw/ofw_disk.c optional ofwd aim dev/ofw/ofwbus.c standard dev/ofw/ofwpci.c optional pci dev/ofw/ofw_standard.c optional aim powerpc dev/ofw/ofw_subr.c standard dev/powermac_nvram/powermac_nvram.c optional powermac_nvram powermac dev/quicc/quicc_bfe_fdt.c optional quicc mpc85xx dev/random/darn.c optional powerpc64 random dev/scc/scc_bfe_macio.c optional scc powermac dev/sdhci/fsl_sdhci.c optional mpc85xx sdhci dev/sec/sec.c optional sec mpc85xx dev/sound/macio/aoa.c optional snd_davbus | snd_ai2s powermac dev/sound/macio/davbus.c optional snd_davbus powermac dev/sound/macio/i2s.c optional snd_ai2s powermac dev/sound/macio/onyx.c optional snd_ai2s iicbus powermac dev/sound/macio/snapper.c optional snd_ai2s iicbus powermac dev/sound/macio/tumbler.c optional snd_ai2s iicbus powermac dev/syscons/scgfbrndr.c optional sc dev/tsec/if_tsec.c optional tsec dev/tsec/if_tsec_fdt.c optional tsec dev/uart/uart_cpu_powerpc.c optional uart dev/usb/controller/ehci_fsl.c optional ehci mpc85xx dev/vt/hw/ofwfb/ofwfb.c optional vt aim kern/kern_clocksource.c standard kern/subr_dummy_vdso_tc.c standard kern/syscalls.c optional ktr kern/subr_sfbuf.c standard libkern/ashldi3.c optional powerpc | powerpcspe libkern/ashrdi3.c optional powerpc | powerpcspe libkern/bcmp.c standard libkern/bcopy.c standard libkern/cmpdi2.c optional powerpc | powerpcspe libkern/divdi3.c optional powerpc | powerpcspe libkern/ffs.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/lshrdi3.c optional powerpc | powerpcspe libkern/memcmp.c standard libkern/memset.c standard libkern/moddi3.c optional powerpc | powerpcspe libkern/qdivrem.c optional powerpc | powerpcspe libkern/ucmpdi2.c optional powerpc | powerpcspe libkern/udivdi3.c optional powerpc | powerpcspe libkern/umoddi3.c optional powerpc | powerpcspe -powerpc/aim/isa3_hashtb.c optional aim powerpc64 powerpc/aim/locore.S optional aim no-obj powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc powerpc/aim/mmu_oea64.c optional aim powerpc/aim/moea64_if.m optional aim powerpc/aim/moea64_native.c optional aim powerpc/aim/mp_cpudep.c optional aim powerpc/aim/slb.c optional aim powerpc64 powerpc/booke/locore.S optional booke no-obj powerpc/booke/booke_machdep.c optional booke powerpc/booke/machdep_e500.c optional booke_e500 powerpc/booke/mp_cpudep.c optional booke smp powerpc/booke/platform_bare.c optional booke powerpc/booke/pmap.c optional booke powerpc/booke/spe.c optional powerpcspe powerpc/cpufreq/dfs.c optional cpufreq powerpc/cpufreq/mpc85xx_jog.c optional cpufreq mpc85xx powerpc/cpufreq/pcr.c optional cpufreq aim powerpc/cpufreq/pmcr.c optional cpufreq aim powerpc64 powerpc/cpufreq/pmufreq.c optional cpufreq aim pmu powerpc/fpu/fpu_add.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_compare.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_div.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_emu.c optional fpu_emu powerpc/fpu/fpu_explode.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_implode.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_mul.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_sqrt.c optional fpu_emu powerpc/fpu/fpu_subr.c optional fpu_emu | powerpcspe powerpc/mambo/mambocall.S optional mambo powerpc/mambo/mambo.c optional mambo powerpc/mambo/mambo_console.c optional mambo powerpc/mambo/mambo_disk.c optional mambo powerpc/mikrotik/platform_rb.c optional mikrotik powerpc/mikrotik/rb_led.c optional mikrotik powerpc/mpc85xx/atpic.c optional mpc85xx isa powerpc/mpc85xx/ds1553_bus_fdt.c optional ds1553 powerpc/mpc85xx/ds1553_core.c optional ds1553 powerpc/mpc85xx/fsl_diu.c optional mpc85xx diu powerpc/mpc85xx/fsl_espi.c optional mpc85xx spibus powerpc/mpc85xx/fsl_sata.c optional mpc85xx ata powerpc/mpc85xx/i2c.c optional iicbus powerpc/mpc85xx/isa.c optional mpc85xx isa powerpc/mpc85xx/lbc.c optional mpc85xx powerpc/mpc85xx/mpc85xx.c optional mpc85xx powerpc/mpc85xx/mpc85xx_cache.c optional mpc85xx powerpc/mpc85xx/mpc85xx_gpio.c optional mpc85xx gpio powerpc/mpc85xx/platform_mpc85xx.c optional mpc85xx powerpc/mpc85xx/pci_mpc85xx.c optional pci mpc85xx powerpc/mpc85xx/pci_mpc85xx_pcib.c optional pci mpc85xx powerpc/mpc85xx/qoriq_gpio.c optional mpc85xx gpio powerpc/ofw/ofw_machdep.c standard powerpc/ofw/ofw_pcibus.c optional pci powerpc/ofw/ofw_pcib_pci.c optional pci powerpc/ofw/ofw_real.c optional aim powerpc/ofw/ofw_syscons.c optional sc aim powerpc/ofw/ofwcall32.S optional aim powerpc powerpc/ofw/ofwcall64.S optional aim powerpc64 powerpc/ofw/openpic_ofw.c standard powerpc/ofw/rtas.c optional aim powerpc/ofw/ofw_initrd.c optional md_root_mem powerpc64 powerpc/powermac/ata_kauai.c optional powermac ata | powermac atamacio powerpc/powermac/ata_macio.c optional powermac ata | powermac atamacio powerpc/powermac/ata_dbdma.c optional powermac ata | powermac atamacio powerpc/powermac/atibl.c optional powermac atibl powerpc/powermac/cuda.c optional powermac cuda powerpc/powermac/cpcht.c optional powermac pci powerpc/powermac/dbdma.c optional powermac pci powerpc/powermac/fcu.c optional powermac fcu powerpc/powermac/grackle.c optional powermac pci powerpc/powermac/hrowpic.c optional powermac pci powerpc/powermac/kiic.c optional powermac kiic powerpc/powermac/macgpio.c optional powermac pci powerpc/powermac/macio.c optional powermac pci powerpc/powermac/nvbl.c optional powermac nvbl powerpc/powermac/platform_powermac.c optional powermac powerpc/powermac/powermac_thermal.c optional powermac powerpc/powermac/pswitch.c optional powermac pswitch powerpc/powermac/pmu.c optional powermac pmu powerpc/powermac/smu.c optional powermac smu powerpc/powermac/smusat.c optional powermac smu powerpc/powermac/uninorth.c optional powermac powerpc/powermac/uninorthpci.c optional powermac pci powerpc/powermac/vcoregpio.c optional powermac powerpc/powernv/opal.c optional powernv powerpc/powernv/opal_async.c optional powernv powerpc/powernv/opal_console.c optional powernv powerpc/powernv/opal_dev.c optional powernv powerpc/powernv/opal_flash.c optional powernv powerpc/powernv/opal_i2c.c optional iicbus fdt powernv powerpc/powernv/opal_i2cm.c optional iicbus fdt powernv powerpc/powernv/opal_pci.c optional powernv pci powerpc/powernv/opal_sensor.c optional powernv powerpc/powernv/opalcall.S optional powernv powerpc/powernv/platform_powernv.c optional powernv powerpc/powernv/powernv_centaur.c optional powernv powerpc/powernv/powernv_xscom.c optional powernv powerpc/powernv/xive.c optional powernv powerpc/powerpc/altivec.c optional powerpc | powerpc64 powerpc/powerpc/autoconf.c standard powerpc/powerpc/bus_machdep.c standard powerpc/powerpc/busdma_machdep.c standard powerpc/powerpc/clock.c standard powerpc/powerpc/copyinout.c standard powerpc/powerpc/copystr.c standard powerpc/powerpc/cpu.c standard powerpc/powerpc/cpu_subr64.S optional powerpc64 powerpc/powerpc/db_disasm.c optional ddb powerpc/powerpc/db_hwwatch.c optional ddb powerpc/powerpc/db_interface.c optional ddb powerpc/powerpc/db_trace.c optional ddb powerpc/powerpc/dump_machdep.c standard powerpc/powerpc/elf32_machdep.c optional powerpc | powerpcspe | compat_freebsd32 powerpc/powerpc/elf64_machdep.c optional powerpc64 powerpc/powerpc/exec_machdep.c standard powerpc/powerpc/fpu.c standard powerpc/powerpc/gdb_machdep.c optional gdb powerpc/powerpc/in_cksum.c optional inet | inet6 powerpc/powerpc/interrupt.c standard powerpc/powerpc/intr_machdep.c standard powerpc/powerpc/iommu_if.m standard powerpc/powerpc/machdep.c standard powerpc/powerpc/mem.c optional mem powerpc/powerpc/mmu_if.m standard powerpc/powerpc/mp_machdep.c optional smp powerpc/powerpc/nexus.c standard powerpc/powerpc/openpic.c standard powerpc/powerpc/pic_if.m standard powerpc/powerpc/pmap_dispatch.c standard powerpc/powerpc/platform.c standard powerpc/powerpc/platform_if.m standard powerpc/powerpc/ptrace_machdep.c standard powerpc/powerpc/sc_machdep.c optional sc powerpc/powerpc/setjmp.S standard powerpc/powerpc/sigcode32.S optional powerpc | powerpcspe | compat_freebsd32 powerpc/powerpc/sigcode64.S optional powerpc64 powerpc/powerpc/swtch32.S optional powerpc | powerpcspe powerpc/powerpc/swtch64.S optional powerpc64 powerpc/powerpc/stack_machdep.c optional ddb | stack powerpc/powerpc/syncicache.c standard powerpc/powerpc/sys_machdep.c standard powerpc/powerpc/trap.c standard powerpc/powerpc/uio_machdep.c standard powerpc/powerpc/uma_machdep.c standard powerpc/powerpc/vm_machdep.c standard powerpc/ps3/ehci_ps3.c optional ps3 ehci powerpc/ps3/ohci_ps3.c optional ps3 ohci powerpc/ps3/if_glc.c optional ps3 glc powerpc/ps3/mmu_ps3.c optional ps3 powerpc/ps3/platform_ps3.c optional ps3 powerpc/ps3/ps3bus.c optional ps3 powerpc/ps3/ps3cdrom.c optional ps3 scbus powerpc/ps3/ps3disk.c optional ps3 powerpc/ps3/ps3pic.c optional ps3 powerpc/ps3/ps3_syscons.c optional ps3 vt powerpc/ps3/ps3-hvcall.S optional ps3 powerpc/pseries/phyp-hvcall.S optional pseries powerpc64 powerpc/pseries/mmu_phyp.c optional pseries powerpc64 powerpc/pseries/phyp_console.c optional pseries powerpc64 uart powerpc/pseries/phyp_llan.c optional llan powerpc/pseries/phyp_vscsi.c optional pseries powerpc64 scbus powerpc/pseries/platform_chrp.c optional pseries powerpc/pseries/plpar_iommu.c optional pseries powerpc64 powerpc/pseries/plpar_pcibus.c optional pseries powerpc64 pci powerpc/pseries/rtas_dev.c optional pseries powerpc/pseries/rtas_pci.c optional pseries pci powerpc/pseries/vdevice.c optional pseries powerpc64 powerpc/pseries/xics.c optional pseries powerpc64 powerpc/psim/iobus.c optional psim powerpc/psim/ata_iobus.c optional ata psim powerpc/psim/openpic_iobus.c optional psim powerpc/psim/uart_iobus.c optional uart psim Index: head/sys/powerpc/aim/isa3_hashtb.c =================================================================== --- head/sys/powerpc/aim/isa3_hashtb.c (revision 345425) +++ head/sys/powerpc/aim/isa3_hashtb.c (nonexistent) @@ -1,632 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD - * - * Copyright (c) 2001 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Matt Thomas of Allegro Networks, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (C) 1995, 1996 Wolfgang Solfrank. - * Copyright (C) 1995, 1996 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ - */ -/*- - * Copyright (C) 2001 Benno Rice. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * Native 64-bit page table operations for running without a hypervisor. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "mmu_oea64.h" -#include "mmu_if.h" -#include "moea64_if.h" - -#define PTESYNC() __asm __volatile("ptesync"); -#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); -#define SYNC() __asm __volatile("sync"); -#define EIEIO() __asm __volatile("eieio"); - -#define VSID_HASH_MASK 0x0000007fffffffffULL - -/* POWER9 only permits a 64k partition table size. */ -#define PART_SIZE 0x10000 - -static __inline void -TLBIE(uint64_t vpn) -{ - vpn <<= ADDR_PIDX_SHFT; - - __asm __volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 21)" - :: "r"(vpn), "r"(0) : "memory"); - __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); -} - -#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) -#define ENABLE_TRANS(msr) mtmsr(msr) - -/* - * PTEG data. - */ -static volatile struct pate *isa3_part_table; -static volatile struct lpte *isa3_hashtb_pteg_table; -static struct rwlock isa3_hashtb_eviction_lock; - -/* - * PTE calls. - */ -static int isa3_hashtb_pte_insert(mmu_t, struct pvo_entry *); -static int64_t isa3_hashtb_pte_synch(mmu_t, struct pvo_entry *); -static int64_t isa3_hashtb_pte_clear(mmu_t, struct pvo_entry *, uint64_t); -static int64_t isa3_hashtb_pte_replace(mmu_t, struct pvo_entry *, int); -static int64_t isa3_hashtb_pte_unset(mmu_t mmu, struct pvo_entry *); - -/* - * Utility routines. - */ -static void isa3_hashtb_bootstrap(mmu_t mmup, - vm_offset_t kernelstart, vm_offset_t kernelend); -static void isa3_hashtb_cpu_bootstrap(mmu_t, int ap); -static void tlbia(void); - -static mmu_method_t isa3_hashtb_methods[] = { - /* Internal interfaces */ - MMUMETHOD(mmu_bootstrap, isa3_hashtb_bootstrap), - MMUMETHOD(mmu_cpu_bootstrap, isa3_hashtb_cpu_bootstrap), - - MMUMETHOD(moea64_pte_synch, isa3_hashtb_pte_synch), - MMUMETHOD(moea64_pte_clear, isa3_hashtb_pte_clear), - MMUMETHOD(moea64_pte_unset, isa3_hashtb_pte_unset), - MMUMETHOD(moea64_pte_replace, isa3_hashtb_pte_replace), - MMUMETHOD(moea64_pte_insert, isa3_hashtb_pte_insert), - - { 0, 0 } -}; - -MMU_DEF_INHERIT(isa3_mmu_native, MMU_TYPE_P9H, isa3_hashtb_methods, - 0, oea64_mmu); - -static int64_t -isa3_hashtb_pte_synch(mmu_t mmu, struct pvo_entry *pvo) -{ - volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; - struct lpte properpt; - uint64_t ptelo; - - PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); - - moea64_pte_from_pvo(pvo, &properpt); - - rw_rlock(&isa3_hashtb_eviction_lock); - if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != - (properpt.pte_hi & LPTE_AVPN_MASK)) { - /* Evicted */ - rw_runlock(&isa3_hashtb_eviction_lock); - return (-1); - } - - PTESYNC(); - ptelo = be64toh(pt->pte_lo); - - rw_runlock(&isa3_hashtb_eviction_lock); - - return (ptelo & (LPTE_REF | LPTE_CHG)); -} - -static int64_t -isa3_hashtb_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) -{ - volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; - struct lpte properpt; - uint64_t ptelo; - - PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); - - moea64_pte_from_pvo(pvo, &properpt); - - rw_rlock(&isa3_hashtb_eviction_lock); - if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != - (properpt.pte_hi & LPTE_AVPN_MASK)) { - /* Evicted */ - rw_runlock(&isa3_hashtb_eviction_lock); - return (-1); - } - - if (ptebit == LPTE_REF) { - /* See "Resetting the Reference Bit" in arch manual */ - PTESYNC(); - /* 2-step here safe: precision is not guaranteed */ - ptelo = be64toh(pt->pte_lo); - - /* One-byte store to avoid touching the C bit */ - ((volatile uint8_t *)(&pt->pte_lo))[6] = -#if BYTE_ORDER == BIG_ENDIAN - ((uint8_t *)(&properpt.pte_lo))[6]; -#else - ((uint8_t *)(&properpt.pte_lo))[1]; -#endif - rw_runlock(&isa3_hashtb_eviction_lock); - - critical_enter(); - TLBIE(pvo->pvo_vpn); - critical_exit(); - } else { - rw_runlock(&isa3_hashtb_eviction_lock); - ptelo = isa3_hashtb_pte_unset(mmu, pvo); - isa3_hashtb_pte_insert(mmu, pvo); - } - - return (ptelo & (LPTE_REF | LPTE_CHG)); -} - -static int64_t -isa3_hashtb_pte_unset(mmu_t mmu, struct pvo_entry *pvo) -{ - volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; - struct lpte properpt; - uint64_t ptelo; - - moea64_pte_from_pvo(pvo, &properpt); - - rw_rlock(&isa3_hashtb_eviction_lock); - if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != - (properpt.pte_hi & LPTE_AVPN_MASK)) { - /* Evicted */ - moea64_pte_overflow--; - rw_runlock(&isa3_hashtb_eviction_lock); - return (-1); - } - - /* - * Invalidate the pte, briefly locking it to collect RC bits. No - * atomics needed since this is protected against eviction by the lock. - */ - isync(); - critical_enter(); - pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); - PTESYNC(); - TLBIE(pvo->pvo_vpn); - ptelo = be64toh(pt->pte_lo); - *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ - critical_exit(); - rw_runlock(&isa3_hashtb_eviction_lock); - - /* Keep statistics */ - moea64_pte_valid--; - - return (ptelo & (LPTE_CHG | LPTE_REF)); -} - -static int64_t -isa3_hashtb_pte_replace(mmu_t mmu, struct pvo_entry *pvo, int flags) -{ - volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; - struct lpte properpt; - int64_t ptelo; - - if (flags == 0) { - /* Just some software bits changing. */ - moea64_pte_from_pvo(pvo, &properpt); - - rw_rlock(&isa3_hashtb_eviction_lock); - if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != - (properpt.pte_hi & LPTE_AVPN_MASK)) { - rw_runlock(&isa3_hashtb_eviction_lock); - return (-1); - } - pt->pte_hi = htobe64(properpt.pte_hi); - ptelo = be64toh(pt->pte_lo); - rw_runlock(&isa3_hashtb_eviction_lock); - } else { - /* Otherwise, need reinsertion and deletion */ - ptelo = isa3_hashtb_pte_unset(mmu, pvo); - isa3_hashtb_pte_insert(mmu, pvo); - } - - return (ptelo); -} - -static void -isa3_hashtb_cpu_bootstrap(mmu_t mmup, int ap) -{ - int i = 0; - struct slb *slb = PCPU_GET(aim.slb); - register_t seg0; - - /* - * Initialize segment registers and MMU - */ - - mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); - - switch (mfpvr() >> 16) { - case IBMPOWER9: - mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX); - break; - } - - /* - * Install kernel SLB entries - */ - - __asm __volatile ("slbia"); - __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : - "r"(0)); - - for (i = 0; i < n_slbs; i++) { - if (!(slb[i].slbe & SLBE_VALID)) - continue; - - __asm __volatile ("slbmte %0, %1" :: - "r"(slb[i].slbv), "r"(slb[i].slbe)); - } - - /* - * Install page table - */ - - mtspr(SPR_PTCR, - ((uintptr_t)isa3_part_table & ~DMAP_BASE_ADDRESS) | - flsl((PART_SIZE >> 12) - 1)); - tlbia(); -} - -static void -isa3_hashtb_bootstrap(mmu_t mmup, vm_offset_t kernelstart, - vm_offset_t kernelend) -{ - vm_size_t size; - vm_offset_t off; - vm_paddr_t pa; - register_t msr; - - moea64_early_bootstrap(mmup, kernelstart, kernelend); - - /* - * Allocate PTEG table. - */ - - size = moea64_pteg_count * sizeof(struct lpteg); - CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %lu bytes", - moea64_pteg_count, size); - rw_init(&isa3_hashtb_eviction_lock, "pte eviction"); - - /* - * We now need to allocate memory. This memory, to be allocated, - * has to reside in a page table. The page table we are about to - * allocate. We don't have BAT. So drop to data real mode for a minute - * as a measure of last resort. We do this a couple times. - */ - - isa3_part_table = - (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); - if (hw_direct_map) - isa3_part_table = (struct pate *)PHYS_TO_DMAP( - (vm_offset_t)isa3_part_table); - /* - * PTEG table must be aligned on a 256k boundary, but can be placed - * anywhere with that alignment on POWER ISA 3+ systems. - */ - isa3_hashtb_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, - MAX(256*1024, size)); - if (hw_direct_map) - isa3_hashtb_pteg_table = - (struct lpte *)PHYS_TO_DMAP((vm_offset_t)isa3_hashtb_pteg_table); - DISABLE_TRANS(msr); - bzero(__DEVOLATILE(void *, isa3_part_table), PART_SIZE); - isa3_part_table[0].pagetab = - ((uintptr_t)isa3_hashtb_pteg_table & ~DMAP_BASE_ADDRESS) | - (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); - bzero(__DEVOLATILE(void *, isa3_hashtb_pteg_table), moea64_pteg_count * - sizeof(struct lpteg)); - ENABLE_TRANS(msr); - - CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", isa3_hashtb_pteg_table); - - moea64_mid_bootstrap(mmup, kernelstart, kernelend); - - /* - * Add a mapping for the page table itself if there is no direct map. - */ - if (!hw_direct_map) { - size = moea64_pteg_count * sizeof(struct lpteg); - off = (vm_offset_t)(isa3_hashtb_pteg_table); - DISABLE_TRANS(msr); - for (pa = off; pa < off + size; pa += PAGE_SIZE) - pmap_kenter(pa, pa); - ENABLE_TRANS(msr); - } - - /* Bring up virtual memory */ - moea64_late_bootstrap(mmup, kernelstart, kernelend); -} - -static void -tlbia(void) -{ - vm_offset_t i; - - i = 0xc00; /* IS = 11 */ - - TLBSYNC(); - - for (; i < 0x200000; i += 0x00001000) { - __asm __volatile("tlbiel %0" :: "r"(i)); - } - - EIEIO(); - TLBSYNC(); -} - -static int -atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) -{ - int ret; - uint32_t oldhihalf; - - /* - * Note: in principle, if just the locked bit were set here, we - * could avoid needing the eviction lock. However, eviction occurs - * so rarely that it isn't worth bothering about in practice. - */ - - __asm __volatile ( - "1:\tlwarx %1, 0, %3\n\t" /* load old value */ - "and. %0,%1,%4\n\t" /* check if any bits set */ - "bne 2f\n\t" /* exit if any set */ - "stwcx. %5, 0, %3\n\t" /* attempt to store */ - "bne- 1b\n\t" /* spin if failed */ - "li %0, 1\n\t" /* success - retval = 1 */ - "b 3f\n\t" /* we've succeeded */ - "2:\n\t" - "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ - "li %0, 0\n\t" /* failure - retval = 0 */ - "3:\n\t" - : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) - : "r" ((volatile char *)&pte->pte_hi + 4), - "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), - "m" (pte->pte_hi) - : "cr0", "cr1", "cr2", "memory"); - - *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; - - return (ret); -} - -static uintptr_t -isa3_hashtb_insert_to_pteg(struct lpte *pvo_pt, uintptr_t slotbase, - uint64_t mask) -{ - volatile struct lpte *pt; - uint64_t oldptehi, va; - uintptr_t k; - int i, j; - - /* Start at a random slot */ - i = mftb() % 8; - for (j = 0; j < 8; j++) { - k = slotbase + (i + j) % 8; - pt = &isa3_hashtb_pteg_table[k]; - /* Invalidate and seize lock only if no bits in mask set */ - if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ - break; - } - - if (j == 8) - return (-1); - - if (oldptehi & LPTE_VALID) { - KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); - /* - * Need to invalidate old entry completely: see - * "Modifying a Page Table Entry". Need to reconstruct - * the virtual address for the outgoing entry to do that. - */ - if (oldptehi & LPTE_BIG) - va = oldptehi >> moea64_large_page_shift; - else - va = oldptehi >> ADDR_PIDX_SHFT; - if (oldptehi & LPTE_HID) - va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & - VSID_HASH_MASK; - else - va = ((k >> 3) ^ va) & VSID_HASH_MASK; - va |= (oldptehi & LPTE_AVPN_MASK) << - (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); - PTESYNC(); - TLBIE(va); - moea64_pte_valid--; - moea64_pte_overflow++; - } - - /* - * Update the PTE as per "Adding a Page Table Entry". Lock is released - * by setting the high doubleworld. - */ - pt->pte_lo = htobe64(pvo_pt->pte_lo); - EIEIO(); - pt->pte_hi = htobe64(pvo_pt->pte_hi); - PTESYNC(); - - /* Keep statistics */ - moea64_pte_valid++; - - return (k); -} - -static int -isa3_hashtb_pte_insert(mmu_t mmu, struct pvo_entry *pvo) -{ - struct lpte insertpt; - uintptr_t slot; - - /* Initialize PTE */ - moea64_pte_from_pvo(pvo, &insertpt); - - /* Make sure further insertion is locked out during evictions */ - rw_rlock(&isa3_hashtb_eviction_lock); - - /* - * First try primary hash. - */ - pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ - slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, - LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); - if (slot != -1) { - rw_runlock(&isa3_hashtb_eviction_lock); - pvo->pvo_pte.slot = slot; - return (0); - } - - /* - * Now try secondary hash. - */ - pvo->pvo_vaddr ^= PVO_HID; - insertpt.pte_hi ^= LPTE_HID; - pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); - slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, - LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); - if (slot != -1) { - rw_runlock(&isa3_hashtb_eviction_lock); - pvo->pvo_pte.slot = slot; - return (0); - } - - /* - * Out of luck. Find a PTE to sacrifice. - */ - - /* Lock out all insertions for a bit */ - if (!rw_try_upgrade(&isa3_hashtb_eviction_lock)) { - rw_runlock(&isa3_hashtb_eviction_lock); - rw_wlock(&isa3_hashtb_eviction_lock); - } - - slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, - LPTE_WIRED | LPTE_LOCKED); - if (slot != -1) { - rw_wunlock(&isa3_hashtb_eviction_lock); - pvo->pvo_pte.slot = slot; - return (0); - } - - /* Try other hash table. Now we're getting desperate... */ - pvo->pvo_vaddr ^= PVO_HID; - insertpt.pte_hi ^= LPTE_HID; - pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); - slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, - LPTE_WIRED | LPTE_LOCKED); - if (slot != -1) { - rw_wunlock(&isa3_hashtb_eviction_lock); - pvo->pvo_pte.slot = slot; - return (0); - } - - /* No freeable slots in either PTEG? We're hosed. */ - rw_wunlock(&isa3_hashtb_eviction_lock); - panic("moea64_pte_insert: overflow"); - return (-1); -} - Property changes on: head/sys/powerpc/aim/isa3_hashtb.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/aim/aim_machdep.c =================================================================== --- head/sys/powerpc/aim/aim_machdep.c (revision 345425) +++ head/sys/powerpc/aim/aim_machdep.c (revision 345426) @@ -1,696 +1,694 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __powerpc64__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __powerpc64__ #include "mmu_oea64.h" #endif #ifndef __powerpc64__ struct bat battable[16]; #endif #ifndef __powerpc64__ /* Bits for running on 64-bit systems in 32-bit mode. */ extern void *testppc64, *testppc64size; extern void *restorebridge, *restorebridgesize; extern void *rfid_patch, *rfi_patch1, *rfi_patch2; extern void *trapcode64; extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; #endif extern void *rstcode, *rstcodeend; extern void *trapcode, *trapcodeend; extern void *hypertrapcode, *hypertrapcodeend; extern void *generictrap, *generictrap64; extern void *alitrap, *aliend; extern void *dsitrap, *dsiend; extern void *decrint, *decrsize; extern void *extint, *extsize; extern void *dblow, *dbend; extern void *imisstrap, *imisssize; extern void *dlmisstrap, *dlmisssize; extern void *dsmisstrap, *dsmisssize; extern void *ap_pcpu; extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie); void aim_cpu_init(vm_offset_t toc); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie) { register_t scratch; /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #ifdef __powerpc64__ /* * If in real mode, relocate to high memory so that the kernel * can execute from the direct map. */ if (!(mfmsr() & PSL_DR) && (vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie, DMAP_BASE_ADDRESS, mfmsr()); #endif /* Various very early CPU fix ups */ switch (mfpvr() >> 16) { /* * PowerPC 970 CPUs have a misfeature requested by Apple that * makes them pretend they have a 32-byte cacheline. Turn this * off before we measure the cacheline size. */ case IBM970: case IBM970FX: case IBM970MP: case IBM970GX: scratch = mfspr(SPR_HID5); scratch &= ~HID5_970_DCBZ_SIZE_HI; mtspr(SPR_HID5, scratch); break; #ifdef __powerpc64__ case IBMPOWER7: case IBMPOWER7PLUS: case IBMPOWER8: case IBMPOWER8E: case IBMPOWER9: /* XXX: get from ibm,slb-size in device tree */ n_slbs = 32; break; #endif } } void aim_cpu_init(vm_offset_t toc) { size_t trap_offset, trapsize; vm_offset_t trap; register_t msr; uint8_t *cache_check; int cacheline_warn; #ifndef __powerpc64__ register_t scratch; int ppc64; #endif trap_offset = 0; cacheline_warn = 0; /* General setup for AIM CPUs */ psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; #ifdef __powerpc64__ psl_kernset |= PSL_SF; if (mfmsr() & PSL_HV) psl_kernset |= PSL_HV; #endif psl_userset = psl_kernset | PSL_PR; #ifdef __powerpc64__ psl_userset32 = psl_userset & ~PSL_SF; #endif /* Bits that users aren't allowed to change */ psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); /* * Mask bits from the SRR1 that aren't really the MSR: * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) */ psl_userstatic &= ~0x783f0000UL; /* * Initialize the interrupt tables and figure out our cache line * size and whether or not we need the 64-bit bridge code. */ /* * Disable translation in case the vector area hasn't been * mapped (G5). Note that no OFW calls can be made until * translation is re-enabled. */ msr = mfmsr(); mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); /* * Measure the cacheline size using dcbz * * Use EXC_PGM as a playground. We are about to overwrite it * anyway, we know it exists, and we know it is cache-aligned. */ cache_check = (void *)EXC_PGM; for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) cache_check[cacheline_size] = 0xff; __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); /* Find the first byte dcbz did not zero to get the cache line size */ for (cacheline_size = 0; cacheline_size < 0x100 && cache_check[cacheline_size] == 0; cacheline_size++); /* Work around psim bug */ if (cacheline_size == 0) { cacheline_warn = 1; cacheline_size = 32; } #ifndef __powerpc64__ /* * Figure out whether we need to use the 64 bit PMAP. This works by * executing an instruction that is only legal on 64-bit PPC (mtmsrd), * and setting ppc64 = 0 if that causes a trap. */ ppc64 = 1; bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); __syncicache((void *)EXC_PGM, (size_t)&testppc64size); __asm __volatile("\ mfmsr %0; \ mtsprg2 %1; \ \ mtmsrd %0; \ mfsprg2 %1;" : "=r"(scratch), "=r"(ppc64)); if (ppc64) cpu_features |= PPC_FEATURE_64; /* * Now copy restorebridge into all the handlers, if necessary, * and set up the trap tables. */ if (cpu_features & PPC_FEATURE_64) { /* Patch the two instances of rfi -> rfid */ bcopy(&rfid_patch,&rfi_patch1,4); #ifdef KDB /* rfi_patch2 is at the end of dbleave */ bcopy(&rfid_patch,&rfi_patch2,4); #endif } #else /* powerpc64 */ cpu_features |= PPC_FEATURE_64; #endif trapsize = (size_t)&trapcodeend - (size_t)&trapcode; /* * Copy generic handler into every possible trap. Special cases will get * different ones in a minute. */ for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) bcopy(&trapcode, (void *)trap, trapsize); #ifndef __powerpc64__ if (cpu_features & PPC_FEATURE_64) { /* * Copy a code snippet to restore 32-bit bridge mode * to the top of every non-generic trap handler */ trap_offset += (size_t)&restorebridgesize; bcopy(&restorebridge, (void *)EXC_RST, trap_offset); bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); } #else trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize); #endif bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - (size_t)&rstcode); #ifdef KDB bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - (size_t)&dblow); #endif bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - (size_t)&alitrap); bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - (size_t)&dsitrap); #ifdef __powerpc64__ /* Set TOC base so that the interrupt code can get at it */ *((void **)TRAP_GENTRAP) = &generictrap; *((register_t *)TRAP_TOCBASE) = toc; #else /* Set branch address for trap code */ if (cpu_features & PPC_FEATURE_64) *((void **)TRAP_GENTRAP) = &generictrap64; else *((void **)TRAP_GENTRAP) = &generictrap; *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; /* G2-specific TLB miss helper handlers */ bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); #endif __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); /* * Restore MSR */ mtmsr(msr); /* Warn if cachline size was not determined */ if (cacheline_warn == 1) { printf("WARNING: cacheline size undetermined, setting to 32\n"); } /* * Initialise virtual memory. Use BUS_PROBE_GENERIC priority * in case the platform module had a better idea of what we * should do. */ - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) - pmap_mmu_install(MMU_TYPE_P9H, BUS_PROBE_GENERIC); - else if (cpu_features & PPC_FEATURE_64) + if (cpu_features & PPC_FEATURE_64) pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); } /* * Shutdown the CPU as much as possible. */ void cpu_halt(void) { OF_exit(); } int ptrace_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 |= PSL_SE; return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 &= ~PSL_SE; return (0); } void kdb_cpu_clear_singlestep(void) { kdb_frame->srr1 &= ~PSL_SE; } void kdb_cpu_set_singlestep(void) { kdb_frame->srr1 |= PSL_SE; } /* * Initialise a struct pcpu. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) { #ifdef __powerpc64__ /* Copy the SLB contents from the current CPU */ memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb)); #endif } #ifndef __powerpc64__ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); } #endif /* * These functions need to provide addresses that both (a) work in real mode * (or whatever mode/circumstances the kernel is in in early boot (now)) and * (b) can still, in principle, work once the kernel is going. Because these * rely on existing mappings/real mode, unmap is a no-op. */ vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); /* * If we have the MMU up in early boot, assume it is 1:1. Otherwise, * try to get the address in a memory region compatible with the * direct map for efficiency later. */ if (mfmsr() & PSL_DR) return (pa); else return (DMAP_BASE_ADDRESS + pa); } void pmap_early_io_unmap(vm_offset_t va, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); } /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ void flush_disable_caches(void) { register_t msr; register_t msscr0; register_t cache_reg; volatile uint32_t *memp; uint32_t temp; int i; int x; msr = mfmsr(); powerpc_sync(); mtmsr(msr & ~(PSL_EE | PSL_DR)); msscr0 = mfspr(SPR_MSSCR0); msscr0 &= ~MSSCR0_L2PFE; mtspr(SPR_MSSCR0, msscr0); powerpc_sync(); isync(); __asm__ __volatile__("dssall; sync"); powerpc_sync(); isync(); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); /* Lock the L1 Data cache. */ mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); powerpc_sync(); isync(); mtspr(SPR_LDSTCR, 0); /* * Perform this in two stages: Flush the cache starting in RAM, then do it * from ROM. */ memp = (volatile uint32_t *)0x00000000; for (i = 0; i < 128 * 1024; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } memp = (volatile uint32_t *)0xfff00000; x = 0xfe; for (; x != 0xff;) { mtspr(SPR_LDSTCR, x); for (i = 0; i < 128; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } x = ((x << 1) | 1) & 0xff; } mtspr(SPR_LDSTCR, 0); cache_reg = mfspr(SPR_L2CR); if (cache_reg & L2CR_L2E) { cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); while (mfspr(SPR_L2CR) & L2CR_L2HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L2CR_L2E; mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2I); powerpc_sync(); while (mfspr(SPR_L2CR) & L2CR_L2I) ; /* Busy wait for L2 cache invalidate */ powerpc_sync(); } cache_reg = mfspr(SPR_L3CR); if (cache_reg & L3CR_L3E) { cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); while (mfspr(SPR_L3CR) & L3CR_L3HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L3CR_L3E; mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3I); powerpc_sync(); while (mfspr(SPR_L3CR) & L3CR_L3I) ; /* Busy wait for L3 cache invalidate */ powerpc_sync(); } mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); powerpc_sync(); isync(); mtmsr(msr); } void cpu_sleep() { static u_quad_t timebase = 0; static register_t sprgs[4]; static register_t srrs[2]; jmp_buf resetjb; struct thread *fputd; struct thread *vectd; register_t hid0; register_t msr; register_t saved_msr; ap_pcpu = pcpup; PCPU_SET(restore, &resetjb); saved_msr = mfmsr(); fputd = PCPU_GET(fputhread); vectd = PCPU_GET(vecthread); if (fputd != NULL) save_fpu(fputd); if (vectd != NULL) save_vec(vectd); if (setjmp(resetjb) == 0) { sprgs[0] = mfspr(SPR_SPRG0); sprgs[1] = mfspr(SPR_SPRG1); sprgs[2] = mfspr(SPR_SPRG2); sprgs[3] = mfspr(SPR_SPRG3); srrs[0] = mfspr(SPR_SRR0); srrs[1] = mfspr(SPR_SRR1); timebase = mftb(); powerpc_sync(); flush_disable_caches(); hid0 = mfspr(SPR_HID0); hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; powerpc_sync(); isync(); msr = mfmsr() | PSL_POW; mtspr(SPR_HID0, hid0); powerpc_sync(); while (1) mtmsr(msr); } platform_smp_timebase_sync(timebase, 0); PCPU_SET(curthread, curthread); PCPU_SET(curpcb, curthread->td_pcb); pmap_activate(curthread); powerpc_sync(); mtspr(SPR_SPRG0, sprgs[0]); mtspr(SPR_SPRG1, sprgs[1]); mtspr(SPR_SPRG2, sprgs[2]); mtspr(SPR_SPRG3, sprgs[3]); mtspr(SPR_SRR0, srrs[0]); mtspr(SPR_SRR1, srrs[1]); mtmsr(saved_msr); if (fputd == curthread) enable_fpu(curthread); if (vectd == curthread) enable_vec(curthread); powerpc_sync(); } Index: head/sys/powerpc/aim/moea64_native.c =================================================================== --- head/sys/powerpc/aim/moea64_native.c (revision 345425) +++ head/sys/powerpc/aim/moea64_native.c (revision 345426) @@ -1,707 +1,747 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD * * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Matt Thomas of Allegro Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */ /*- * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Native 64-bit page table operations for running without a hypervisor. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmu_oea64.h" #include "mmu_if.h" #include "moea64_if.h" #define PTESYNC() __asm __volatile("ptesync"); #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); #define SYNC() __asm __volatile("sync"); #define EIEIO() __asm __volatile("eieio"); #define VSID_HASH_MASK 0x0000007fffffffffULL /* POWER9 only permits a 64k partition table size. */ #define PART_SIZE 0x10000 -static int moea64_crop_tlbie; +static bool moea64_crop_tlbie; +static bool moea64_need_lock; static __inline void TLBIE(uint64_t vpn) { #ifndef __powerpc64__ register_t vpn_hi, vpn_lo; register_t msr; register_t scratch, intr; #endif static volatile u_int tlbie_lock = 0; vpn <<= ADDR_PIDX_SHFT; /* Hobo spinlock: we need stronger guarantees than mutexes provide */ - while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); - isync(); /* Flush instruction queue once lock acquired */ + if (moea64_need_lock) { + while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); + isync(); /* Flush instruction queue once lock acquired */ + } if (moea64_crop_tlbie) vpn &= ~(0xffffULL << 48); #ifdef __powerpc64__ - __asm __volatile("li 0, 0; tlbie %0" :: "r"(vpn) : "0","memory"); + /* + * Explicitly clobber r0. The tlbie instruction has two forms: an old + * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA + * 2.06 (maybe 2.05?) and later. We need to support both, and it just + * so happens that since we use 4k pages we can simply zero out r0, and + * clobber it, and the assembler will interpret the single-operand form + * of tlbie as having RB set, and everything else as 0. The RS operand + * in the newer form is in the same position as the L(page size) bit of + * the old form, so a slong as RS is 0, we're good on both sides. + */ + __asm __volatile("li 0, 0 \n tlbie %0" :: "r"(vpn) : "r0", "memory"); __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); #else vpn_hi = (uint32_t)(vpn >> 32); vpn_lo = (uint32_t)vpn; intr = intr_disable(); __asm __volatile("\ mfmsr %0; \ mr %1, %0; \ insrdi %1,%5,1,0; \ mtmsrd %1; isync; \ \ sld %1,%2,%4; \ or %1,%1,%3; \ tlbie %1; \ \ mtmsrd %0; isync; \ eieio; \ tlbsync; \ ptesync;" : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) : "memory"); intr_restore(intr); #endif /* No barriers or special ops -- taken care of by ptesync above */ - tlbie_lock = 0; + if (moea64_need_lock) + tlbie_lock = 0; } #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) #define ENABLE_TRANS(msr) mtmsr(msr) /* * PTEG data. */ static volatile struct lpte *moea64_pteg_table; static struct rwlock moea64_eviction_lock; +static volatile struct pate *moea64_part_table; + /* * PTE calls. */ static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); /* * Utility routines. */ static void moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend); static void moea64_cpu_bootstrap_native(mmu_t, int ap); static void tlbia(void); static mmu_method_t moea64_native_methods[] = { /* Internal interfaces */ MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), { 0, 0 } }; MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 0, oea64_mmu); static int64_t moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) { volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); moea64_pte_from_pvo(pvo, &properpt); rw_rlock(&moea64_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ rw_runlock(&moea64_eviction_lock); return (-1); } PTESYNC(); ptelo = be64toh(pt->pte_lo); rw_runlock(&moea64_eviction_lock); return (ptelo & (LPTE_REF | LPTE_CHG)); } static int64_t moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) { volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); moea64_pte_from_pvo(pvo, &properpt); rw_rlock(&moea64_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ rw_runlock(&moea64_eviction_lock); return (-1); } if (ptebit == LPTE_REF) { /* See "Resetting the Reference Bit" in arch manual */ PTESYNC(); /* 2-step here safe: precision is not guaranteed */ ptelo = be64toh(pt->pte_lo); /* One-byte store to avoid touching the C bit */ ((volatile uint8_t *)(&pt->pte_lo))[6] = #if BYTE_ORDER == BIG_ENDIAN ((uint8_t *)(&properpt.pte_lo))[6]; #else ((uint8_t *)(&properpt.pte_lo))[1]; #endif rw_runlock(&moea64_eviction_lock); critical_enter(); TLBIE(pvo->pvo_vpn); critical_exit(); } else { rw_runlock(&moea64_eviction_lock); ptelo = moea64_pte_unset_native(mmu, pvo); moea64_pte_insert_native(mmu, pvo); } return (ptelo & (LPTE_REF | LPTE_CHG)); } static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) { volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; moea64_pte_from_pvo(pvo, &properpt); rw_rlock(&moea64_eviction_lock); if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ moea64_pte_overflow--; rw_runlock(&moea64_eviction_lock); return (-1); } /* * Invalidate the pte, briefly locking it to collect RC bits. No * atomics needed since this is protected against eviction by the lock. */ isync(); critical_enter(); pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); PTESYNC(); TLBIE(pvo->pvo_vpn); ptelo = be64toh(pt->pte_lo); *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ critical_exit(); rw_runlock(&moea64_eviction_lock); /* Keep statistics */ moea64_pte_valid--; return (ptelo & (LPTE_CHG | LPTE_REF)); } static int64_t moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) { volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; int64_t ptelo; if (flags == 0) { /* Just some software bits changing. */ moea64_pte_from_pvo(pvo, &properpt); rw_rlock(&moea64_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { rw_runlock(&moea64_eviction_lock); return (-1); } pt->pte_hi = htobe64(properpt.pte_hi); ptelo = be64toh(pt->pte_lo); rw_runlock(&moea64_eviction_lock); } else { /* Otherwise, need reinsertion and deletion */ ptelo = moea64_pte_unset_native(mmu, pvo); moea64_pte_insert_native(mmu, pvo); } return (ptelo); } static void moea64_cpu_bootstrap_native(mmu_t mmup, int ap) { int i = 0; #ifdef __powerpc64__ struct slb *slb = PCPU_GET(aim.slb); register_t seg0; #endif /* * Initialize segment registers and MMU */ mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); /* * Install kernel SLB entries */ #ifdef __powerpc64__ __asm __volatile ("slbia"); __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); for (i = 0; i < n_slbs; i++) { if (!(slb[i].slbe & SLBE_VALID)) continue; __asm __volatile ("slbmte %0, %1" :: "r"(slb[i].slbv), "r"(slb[i].slbe)); } #else for (i = 0; i < 16; i++) mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); #endif /* * Install page table */ - __asm __volatile ("ptesync; mtsdr1 %0; isync" - :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) - | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); + if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) + mtspr(SPR_PTCR, + ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | + flsl((PART_SIZE >> 12) - 1)); + else + __asm __volatile ("ptesync; mtsdr1 %0; isync" + :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) + | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); tlbia(); } static void moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { vm_size_t size; vm_offset_t off; vm_paddr_t pa; register_t msr; moea64_early_bootstrap(mmup, kernelstart, kernelend); switch (mfpvr() >> 16) { + case IBMPOWER9: + moea64_need_lock = false; + break; case IBMPOWER4: case IBMPOWER4PLUS: case IBM970: case IBM970FX: case IBM970GX: case IBM970MP: - moea64_crop_tlbie = true; + moea64_crop_tlbie = true; + default: + moea64_need_lock = true; } /* * Allocate PTEG table. */ size = moea64_pteg_count * sizeof(struct lpteg); CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes", moea64_pteg_count, size); rw_init(&moea64_eviction_lock, "pte eviction"); /* * We now need to allocate memory. This memory, to be allocated, * has to reside in a page table. The page table we are about to * allocate. We don't have BAT. So drop to data real mode for a minute * as a measure of last resort. We do this a couple times. */ /* * PTEG table must be aligned on a 256k boundary, but can be placed * anywhere with that alignment on POWER ISA 3+ systems. On earlier * systems, offset addition is done by the CPU with bitwise OR rather * than addition, so the table must also be aligned on a boundary of * its own size. Pick the larger of the two, which works on all * systems. */ moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, MAX(256*1024, size)); if (hw_direct_map) moea64_pteg_table = (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); + /* Allocate partition table (ISA 3.0). */ + if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { + moea64_part_table = + (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); + if (hw_direct_map) + moea64_part_table = + (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table); + } DISABLE_TRANS(msr); bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * sizeof(struct lpteg)); + if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { + bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); + moea64_part_table[0].pagetab = + (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) | + (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); + } ENABLE_TRANS(msr); CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); moea64_mid_bootstrap(mmup, kernelstart, kernelend); /* * Add a mapping for the page table itself if there is no direct map. */ if (!hw_direct_map) { size = moea64_pteg_count * sizeof(struct lpteg); off = (vm_offset_t)(moea64_pteg_table); DISABLE_TRANS(msr); for (pa = off; pa < off + size; pa += PAGE_SIZE) pmap_kenter(pa, pa); ENABLE_TRANS(msr); } /* Bring up virtual memory */ moea64_late_bootstrap(mmup, kernelstart, kernelend); } static void tlbia(void) { vm_offset_t i; #ifndef __powerpc64__ register_t msr, scratch; #endif i = 0xc00; /* IS = 11 */ switch (mfpvr() >> 16) { case IBM970: case IBM970FX: case IBM970MP: case IBM970GX: case IBMPOWER4: case IBMPOWER4PLUS: case IBMPOWER5: case IBMPOWER5PLUS: i = 0; /* IS not supported */ break; } TLBSYNC(); - for (; i < 0x200000; i += 0x00001000) { + for (; i < 0x400000; i += 0x00001000) { #ifdef __powerpc64__ __asm __volatile("tlbiel %0" :: "r"(i)); #else __asm __volatile("\ mfmsr %0; \ mr %1, %0; \ insrdi %1,%3,1,0; \ mtmsrd %1; \ isync; \ \ tlbiel %2; \ \ mtmsrd %0; \ isync;" : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); #endif } EIEIO(); TLBSYNC(); } static int atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) { int ret; uint32_t oldhihalf; /* * Note: in principle, if just the locked bit were set here, we * could avoid needing the eviction lock. However, eviction occurs * so rarely that it isn't worth bothering about in practice. */ __asm __volatile ( "1:\tlwarx %1, 0, %3\n\t" /* load old value */ "and. %0,%1,%4\n\t" /* check if any bits set */ "bne 2f\n\t" /* exit if any set */ "stwcx. %5, 0, %3\n\t" /* attempt to store */ "bne- 1b\n\t" /* spin if failed */ "li %0, 1\n\t" /* success - retval = 1 */ "b 3f\n\t" /* we've succeeded */ "2:\n\t" "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ "li %0, 0\n\t" /* failure - retval = 0 */ "3:\n\t" : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) : "r" ((volatile char *)&pte->pte_hi + 4), "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), "m" (pte->pte_hi) : "cr0", "cr1", "cr2", "memory"); *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; return (ret); } static uintptr_t moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, uint64_t mask) { volatile struct lpte *pt; uint64_t oldptehi, va; uintptr_t k; int i, j; /* Start at a random slot */ i = mftb() % 8; for (j = 0; j < 8; j++) { k = slotbase + (i + j) % 8; pt = &moea64_pteg_table[k]; /* Invalidate and seize lock only if no bits in mask set */ if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ break; } if (j == 8) return (-1); if (oldptehi & LPTE_VALID) { KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); /* * Need to invalidate old entry completely: see * "Modifying a Page Table Entry". Need to reconstruct * the virtual address for the outgoing entry to do that. */ if (oldptehi & LPTE_BIG) va = oldptehi >> moea64_large_page_shift; else va = oldptehi >> ADDR_PIDX_SHFT; if (oldptehi & LPTE_HID) va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & VSID_HASH_MASK; else va = ((k >> 3) ^ va) & VSID_HASH_MASK; va |= (oldptehi & LPTE_AVPN_MASK) << (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); PTESYNC(); TLBIE(va); moea64_pte_valid--; moea64_pte_overflow++; } /* * Update the PTE as per "Adding a Page Table Entry". Lock is released * by setting the high doubleworld. */ pt->pte_lo = htobe64(pvo_pt->pte_lo); EIEIO(); pt->pte_hi = htobe64(pvo_pt->pte_hi); PTESYNC(); /* Keep statistics */ moea64_pte_valid++; return (k); } static int moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) { struct lpte insertpt; uintptr_t slot; /* Initialize PTE */ moea64_pte_from_pvo(pvo, &insertpt); /* Make sure further insertion is locked out during evictions */ rw_rlock(&moea64_eviction_lock); /* * First try primary hash. */ pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { rw_runlock(&moea64_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } /* * Now try secondary hash. */ pvo->pvo_vaddr ^= PVO_HID; insertpt.pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { rw_runlock(&moea64_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } /* * Out of luck. Find a PTE to sacrifice. */ /* Lock out all insertions for a bit */ if (!rw_try_upgrade(&moea64_eviction_lock)) { rw_runlock(&moea64_eviction_lock); rw_wlock(&moea64_eviction_lock); } slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { rw_wunlock(&moea64_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } /* Try other hash table. Now we're getting desperate... */ pvo->pvo_vaddr ^= PVO_HID; insertpt.pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { rw_wunlock(&moea64_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } /* No freeable slots in either PTEG? We're hosed. */ rw_wunlock(&moea64_eviction_lock); panic("moea64_pte_insert: overflow"); return (-1); }