diff --git a/sys/powerpc/aim/mp_cpudep.c b/sys/powerpc/aim/mp_cpudep.c index cb3856cc4bf0..a91778fa09b7 100644 --- a/sys/powerpc/aim/mp_cpudep.c +++ b/sys/powerpc/aim/mp_cpudep.c @@ -1,428 +1,428 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void *ap_pcpu; static register_t bsp_state[8] __aligned(8); static void cpudep_save_config(void *dummy); SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL); void cpudep_ap_early_bootstrap(void) { #ifndef __powerpc64__ register_t reg; #endif switch (mfpvr() >> 16) { case IBM970: case IBM970FX: case IBM970MP: /* Set HIOR to 0 */ __asm __volatile("mtspr 311,%0" :: "r"(0)); powerpc_sync(); /* Restore HID4 and HID5, which are necessary for the MMU */ #ifdef __powerpc64__ mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync(); mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync(); #else __asm __volatile("ld %0, 16(%2); sync; isync; \ mtspr %1, %0; sync; isync;" : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state)); __asm __volatile("ld %0, 24(%2); sync; isync; \ mtspr %1, %0; sync; isync;" : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state)); #endif powerpc_sync(); break; case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: case IBMPOWER9: #ifdef __powerpc64__ if (mfmsr() & PSL_HV) { isync(); /* * Direct interrupts to SRR instead of HSRR and * reset LPCR otherwise */ mtspr(SPR_LPID, 0); isync(); mtspr(SPR_LPCR, lpcr); isync(); /* * Nuke FSCR, to be managed on a per-process basis * later. */ mtspr(SPR_FSCR, 0); } #endif break; } __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); powerpc_sync(); } uintptr_t cpudep_ap_bootstrap(void) { register_t msr, sp; msr = psl_kernset & ~PSL_EE; mtmsr(msr); pcpup->pc_curthread = pcpup->pc_idlethread; #ifdef __powerpc64__ __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread)); #else __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread)); #endif pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; sp = pcpup->pc_curpcb->pcb_sp; schedinit_ap(); return (sp); } static register_t mpc74xx_l2_enable(register_t l2cr_config) { register_t ccr, bit; uint16_t vers; vers = mfpvr() >> 16; switch (vers) { case MPC7400: case MPC7410: bit = L2CR_L2IP; break; default: bit = L2CR_L2I; break; } ccr = mfspr(SPR_L2CR); if (ccr & L2CR_L2E) return (ccr); /* Configure L2 cache. */ ccr = l2cr_config & ~L2CR_L2E; mtspr(SPR_L2CR, ccr | L2CR_L2I); do { ccr = mfspr(SPR_L2CR); } while (ccr & bit); powerpc_sync(); mtspr(SPR_L2CR, l2cr_config); powerpc_sync(); return (l2cr_config); } static register_t mpc745x_l3_enable(register_t l3cr_config) { register_t ccr; ccr = mfspr(SPR_L3CR); if (ccr & L3CR_L3E) return (ccr); /* Configure L3 cache. */ ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN); mtspr(SPR_L3CR, ccr); ccr |= 0x4000000; /* Magic, but documented. */ mtspr(SPR_L3CR, ccr); ccr |= L3CR_L3CLKEN; mtspr(SPR_L3CR, ccr); mtspr(SPR_L3CR, ccr | L3CR_L3I); while (mfspr(SPR_L3CR) & L3CR_L3I) ; mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN); powerpc_sync(); DELAY(100); mtspr(SPR_L3CR, ccr); powerpc_sync(); DELAY(100); ccr |= L3CR_L3E; mtspr(SPR_L3CR, ccr); powerpc_sync(); return(ccr); } static register_t mpc74xx_l1d_enable(void) { register_t hid; hid = mfspr(SPR_HID0); if (hid & HID0_DCE) return (hid); /* Enable L1 D-cache */ hid |= HID0_DCE; powerpc_sync(); mtspr(SPR_HID0, hid | HID0_DCFI); powerpc_sync(); return (hid); } static register_t mpc74xx_l1i_enable(void) { register_t hid; hid = mfspr(SPR_HID0); if (hid & HID0_ICE) return (hid); /* Enable L1 I-cache */ hid |= HID0_ICE; isync(); mtspr(SPR_HID0, hid | HID0_ICFI); isync(); return (hid); } static void cpudep_save_config(void *dummy) { uint16_t vers; vers = mfpvr() >> 16; switch(vers) { case IBM970: case IBM970FX: case IBM970MP: #ifdef __powerpc64__ bsp_state[0] = mfspr(SPR_HID0); bsp_state[1] = mfspr(SPR_HID1); bsp_state[2] = mfspr(SPR_HID4); bsp_state[3] = mfspr(SPR_HID5); #else __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0)); __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1)); __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4)); __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5)); #endif powerpc_sync(); break; case IBMCELLBE: #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ if (mfmsr() & PSL_HV) { bsp_state[0] = mfspr(SPR_HID0); bsp_state[1] = mfspr(SPR_HID1); bsp_state[2] = mfspr(SPR_HID4); bsp_state[3] = mfspr(SPR_HID6); bsp_state[4] = mfspr(SPR_CELL_TSCR); } #endif bsp_state[5] = mfspr(SPR_CELL_TSRL); break; case MPC7450: case MPC7455: case MPC7457: /* Only MPC745x CPUs have an L3 cache. */ bsp_state[3] = mfspr(SPR_L3CR); /* Fallthrough */ case MPC7400: case MPC7410: case MPC7447A: case MPC7448: bsp_state[2] = mfspr(SPR_L2CR); bsp_state[1] = mfspr(SPR_HID1); bsp_state[0] = mfspr(SPR_HID0); break; } } void -cpudep_ap_setup() +cpudep_ap_setup(void) { #ifndef __powerpc64__ register_t reg; #endif uint16_t vers; vers = mfpvr() >> 16; switch(vers) { case IBM970: case IBM970FX: case IBM970MP: /* * The 970 has strange rules about how to update HID registers. * See Table 2-3, 970MP manual * * Note: HID4 and HID5 restored already in * cpudep_ap_early_bootstrap() */ __asm __volatile("mtasr %0; sync" :: "r"(0)); #ifdef __powerpc64__ __asm __volatile(" \ sync; isync; \ mtspr %1, %0; \ mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ sync; isync" :: "r"(bsp_state[0]), "K"(SPR_HID0)); __asm __volatile("sync; isync; \ mtspr %1, %0; mtspr %1, %0; sync; isync" :: "r"(bsp_state[1]), "K"(SPR_HID1)); #else __asm __volatile(" \ ld %0,0(%2); \ sync; isync; \ mtspr %1, %0; \ mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ sync; isync" : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state)); __asm __volatile("ld %0, 8(%2); sync; isync; \ mtspr %1, %0; mtspr %1, %0; sync; isync" : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state)); #endif powerpc_sync(); break; case IBMCELLBE: #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ if (mfmsr() & PSL_HV) { mtspr(SPR_HID0, bsp_state[0]); mtspr(SPR_HID1, bsp_state[1]); mtspr(SPR_HID4, bsp_state[2]); mtspr(SPR_HID6, bsp_state[3]); mtspr(SPR_CELL_TSCR, bsp_state[4]); } #endif mtspr(SPR_CELL_TSRL, bsp_state[5]); break; case MPC7400: case MPC7410: case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: /* XXX: Program the CPU ID into PIR */ __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); powerpc_sync(); isync(); mtspr(SPR_HID0, bsp_state[0]); isync(); mtspr(SPR_HID1, bsp_state[1]); isync(); /* Now enable the L3 cache. */ switch (vers) { case MPC7450: case MPC7455: case MPC7457: /* Only MPC745x CPUs have an L3 cache. */ mpc745x_l3_enable(bsp_state[3]); default: break; } mpc74xx_l2_enable(bsp_state[2]); mpc74xx_l1d_enable(); mpc74xx_l1i_enable(); break; case IBMPOWER7: case IBMPOWER7PLUS: case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: case IBMPOWER9: #ifdef __powerpc64__ if (mfmsr() & PSL_HV) { mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr | LPCR_PECE_WAKESET); isync(); } #endif break; default: #ifdef __powerpc64__ if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */ break; #endif printf("WARNING: Unknown CPU type. Cache performace may be " "suboptimal.\n"); break; } } diff --git a/sys/powerpc/ofw/ofw_machdep.c b/sys/powerpc/ofw/ofw_machdep.c index 805d19cc2b42..24b89f9dd00d 100644 --- a/sys/powerpc/ofw/ofw_machdep.c +++ b/sys/powerpc/ofw/ofw_machdep.c @@ -1,875 +1,875 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1996 Wolfgang Solfrank. * Copyright (C) 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: ofw_machdep.c,v 1.5 2000/05/23 13:25:43 tsubai Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef POWERNV #include #endif static void *fdt; int ofw_real_mode; #ifdef AIM extern register_t ofmsr[5]; extern void *openfirmware_entry; char save_trap_init[0x2f00]; /* EXC_LAST */ char save_trap_of[0x2f00]; /* EXC_LAST */ int ofwcall(void *); static int openfirmware(void *args); #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfortify-source" __inline void ofw_save_trap_vec(char *save_trap_vec) { if (!ofw_real_mode || !hw_direct_map) return; bcopy((void *)PHYS_TO_DMAP(EXC_RST), save_trap_vec, EXC_LAST - EXC_RST); } static __inline void ofw_restore_trap_vec(char *restore_trap_vec) { if (!ofw_real_mode || !hw_direct_map) return; bcopy(restore_trap_vec, (void *)PHYS_TO_DMAP(EXC_RST), EXC_LAST - EXC_RST); __syncicache((void *)PHYS_TO_DMAP(EXC_RSVD), EXC_LAST - EXC_RSVD); } #pragma clang diagnostic pop /* * Saved SPRG0-3 from OpenFirmware. Will be restored prior to the callback. */ register_t ofw_sprg0_save; static __inline void ofw_sprg_prepare(void) { if (ofw_real_mode) return; /* * Assume that interrupt are disabled at this point, or * SPRG1-3 could be trashed */ #ifdef __powerpc64__ __asm __volatile("mtsprg1 %0\n\t" "mtsprg2 %1\n\t" "mtsprg3 %2\n\t" : : "r"(ofmsr[2]), "r"(ofmsr[3]), "r"(ofmsr[4])); #else __asm __volatile("mfsprg0 %0\n\t" "mtsprg0 %1\n\t" "mtsprg1 %2\n\t" "mtsprg2 %3\n\t" "mtsprg3 %4\n\t" : "=&r"(ofw_sprg0_save) : "r"(ofmsr[1]), "r"(ofmsr[2]), "r"(ofmsr[3]), "r"(ofmsr[4])); #endif } static __inline void ofw_sprg_restore(void) { if (ofw_real_mode) return; /* * Note that SPRG1-3 contents are irrelevant. They are scratch * registers used in the early portion of trap handling when * interrupts are disabled. * * PCPU data cannot be used until this routine is called ! */ #ifndef __powerpc64__ __asm __volatile("mtsprg0 %0" :: "r"(ofw_sprg0_save)); #endif } #endif static int parse_ofw_memory(phandle_t node, const char *prop, struct mem_region *output) { cell_t address_cells, size_cells; cell_t OFmem[4 * PHYS_AVAIL_SZ]; int sz, i, j; phandle_t phandle; sz = 0; /* * Get #address-cells from root node, defaulting to 1 if it cannot * be found. */ phandle = OF_finddevice("/"); if (OF_getencprop(phandle, "#address-cells", &address_cells, sizeof(address_cells)) < (ssize_t)sizeof(address_cells)) address_cells = 1; if (OF_getencprop(phandle, "#size-cells", &size_cells, sizeof(size_cells)) < (ssize_t)sizeof(size_cells)) size_cells = 1; /* * Get memory. */ if (node == -1 || (sz = OF_getencprop(node, prop, OFmem, sizeof(OFmem))) <= 0) panic("Physical memory map not found"); i = 0; j = 0; while (i < sz/sizeof(cell_t)) { output[j].mr_start = OFmem[i++]; if (address_cells == 2) { output[j].mr_start <<= 32; output[j].mr_start += OFmem[i++]; } output[j].mr_size = OFmem[i++]; if (size_cells == 2) { output[j].mr_size <<= 32; output[j].mr_size += OFmem[i++]; } if (output[j].mr_start > BUS_SPACE_MAXADDR) continue; /* * Constrain memory to that which we can access. * 32-bit AIM can only reference 32 bits of address currently, * but Book-E can access 36 bits. */ if (((uint64_t)output[j].mr_start + (uint64_t)output[j].mr_size - 1) > BUS_SPACE_MAXADDR) { output[j].mr_size = BUS_SPACE_MAXADDR - output[j].mr_start + 1; } j++; } return (j); } static int parse_numa_ofw_memory(phandle_t node, const char *prop, struct numa_mem_region *output) { cell_t address_cells, size_cells; cell_t OFmem[4 * PHYS_AVAIL_SZ]; int sz, i, j; phandle_t phandle; sz = 0; /* * Get #address-cells from root node, defaulting to 1 if it cannot * be found. */ phandle = OF_finddevice("/"); if (OF_getencprop(phandle, "#address-cells", &address_cells, sizeof(address_cells)) < (ssize_t)sizeof(address_cells)) address_cells = 1; if (OF_getencprop(phandle, "#size-cells", &size_cells, sizeof(size_cells)) < (ssize_t)sizeof(size_cells)) size_cells = 1; /* * Get memory. */ if (node == -1 || (sz = OF_getencprop(node, prop, OFmem, sizeof(OFmem))) <= 0) panic("Physical memory map not found"); i = 0; j = 0; while (i < sz/sizeof(cell_t)) { output[j].mr_start = OFmem[i++]; if (address_cells == 2) { output[j].mr_start <<= 32; output[j].mr_start += OFmem[i++]; } output[j].mr_size = OFmem[i++]; if (size_cells == 2) { output[j].mr_size <<= 32; output[j].mr_size += OFmem[i++]; } j++; } return (j); } #ifdef FDT static int excise_reserved_regions(struct mem_region *avail, int asz, struct mem_region *exclude, int esz) { int i, j, k; for (i = 0; i < asz; i++) { for (j = 0; j < esz; j++) { /* * Case 1: Exclusion region encloses complete * available entry. Drop it and move on. */ if (exclude[j].mr_start <= avail[i].mr_start && exclude[j].mr_start + exclude[j].mr_size >= avail[i].mr_start + avail[i].mr_size) { for (k = i+1; k < asz; k++) avail[k-1] = avail[k]; asz--; i--; /* Repeat some entries */ continue; } /* * Case 2: Exclusion region starts in available entry. * Trim it to where the entry begins and append * a new available entry with the region after * the excluded region, if any. */ if (exclude[j].mr_start >= avail[i].mr_start && exclude[j].mr_start < avail[i].mr_start + avail[i].mr_size) { if (exclude[j].mr_start + exclude[j].mr_size < avail[i].mr_start + avail[i].mr_size) { avail[asz].mr_start = exclude[j].mr_start + exclude[j].mr_size; avail[asz].mr_size = avail[i].mr_start + avail[i].mr_size - avail[asz].mr_start; asz++; } avail[i].mr_size = exclude[j].mr_start - avail[i].mr_start; } /* * Case 3: Exclusion region ends in available entry. * Move start point to where the exclusion zone ends. * The case of a contained exclusion zone has already * been caught in case 2. */ if (exclude[j].mr_start + exclude[j].mr_size >= avail[i].mr_start && exclude[j].mr_start + exclude[j].mr_size < avail[i].mr_start + avail[i].mr_size) { avail[i].mr_size += avail[i].mr_start; avail[i].mr_start = exclude[j].mr_start + exclude[j].mr_size; avail[i].mr_size -= avail[i].mr_start; } } } return (asz); } static int excise_initrd_region(struct mem_region *avail, int asz) { phandle_t chosen; uint64_t start, end; ssize_t size; struct mem_region initrdmap[1]; pcell_t cell[2]; chosen = OF_finddevice("/chosen"); size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell)); if (size < 0) return (asz); else if (size == 4) start = cell[0]; else if (size == 8) start = (uint64_t)cell[0] << 32 | cell[1]; else { /* Invalid value length */ printf("WARNING: linux,initrd-start must be either 4 or 8 bytes long\n"); return (asz); } size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell)); if (size < 0) return (asz); else if (size == 4) end = cell[0]; else if (size == 8) end = (uint64_t)cell[0] << 32 | cell[1]; else { /* Invalid value length */ printf("WARNING: linux,initrd-end must be either 4 or 8 bytes long\n"); return (asz); } if (end <= start) return (asz); initrdmap[0].mr_start = start; initrdmap[0].mr_size = end - start; asz = excise_reserved_regions(avail, asz, initrdmap, 1); return (asz); } #ifdef POWERNV static int excise_msi_region(struct mem_region *avail, int asz) { uint64_t start, end; struct mem_region initrdmap[1]; /* * This range of physical addresses is used to implement optimized * 32 bit MSI interrupts on POWER9. Exclude it to avoid accidentally * using it for DMA, as this will cause an immediate PHB fence. * While we could theoretically turn off this behavior in the ETU, * doing so would break 32-bit MSI, so just reserve the range in * the physical map instead. * See section 4.4.2.8 of the PHB4 specification. */ start = 0x00000000ffff0000ul; end = 0x00000000fffffffful; initrdmap[0].mr_start = start; initrdmap[0].mr_size = end - start; asz = excise_reserved_regions(avail, asz, initrdmap, 1); return (asz); } #endif static int excise_fdt_reserved(struct mem_region *avail, int asz) { struct mem_region fdtmap[32]; ssize_t fdtmapsize; phandle_t chosen; int j, fdtentries; chosen = OF_finddevice("/chosen"); fdtmapsize = OF_getprop(chosen, "fdtmemreserv", fdtmap, sizeof(fdtmap)); for (j = 0; j < fdtmapsize/sizeof(fdtmap[0]); j++) { fdtmap[j].mr_start = be64toh(fdtmap[j].mr_start) & ~PAGE_MASK; fdtmap[j].mr_size = round_page(be64toh(fdtmap[j].mr_size)); } KASSERT(j*sizeof(fdtmap[0]) < sizeof(fdtmap), ("Exceeded number of FDT reservations")); /* Add a virtual entry for the FDT itself */ if (fdt != NULL) { fdtmap[j].mr_start = (vm_offset_t)fdt & ~PAGE_MASK; fdtmap[j].mr_size = round_page(fdt_totalsize(fdt)); fdtmapsize += sizeof(fdtmap[0]); } fdtentries = fdtmapsize/sizeof(fdtmap[0]); asz = excise_reserved_regions(avail, asz, fdtmap, fdtentries); return (asz); } #endif /* * This is called during powerpc_init, before the system is really initialized. * It shall provide the total and the available regions of RAM. * The available regions need not take the kernel into account. */ void ofw_numa_mem_regions(struct numa_mem_region *memp, int *memsz) { phandle_t phandle; int count, msz; char name[31]; struct numa_mem_region *curmemp; msz = 0; /* * Get memory from all the /memory nodes. */ for (phandle = OF_child(OF_peer(0)); phandle != 0; phandle = OF_peer(phandle)) { if (OF_getprop(phandle, "name", name, sizeof(name)) <= 0) continue; if (strncmp(name, "memory@", strlen("memory@")) != 0) continue; count = parse_numa_ofw_memory(phandle, "reg", &memp[msz]); if (count == 0) continue; curmemp = &memp[msz]; MPASS(count == 1); curmemp->mr_domain = platform_node_numa_domain(phandle); if (bootverbose) printf("%s %#jx-%#jx domain(%ju)\n", name, (uintmax_t)curmemp->mr_start, (uintmax_t)curmemp->mr_start + curmemp->mr_size, (uintmax_t)curmemp->mr_domain); msz += count; } *memsz = msz; } /* * This is called during powerpc_init, before the system is really initialized. * It shall provide the total and the available regions of RAM. * The available regions need not take the kernel into account. */ void ofw_mem_regions(struct mem_region *memp, int *memsz, struct mem_region *availp, int *availsz) { phandle_t phandle; int asz, msz; int res; char name[31]; asz = msz = 0; /* * Get memory from all the /memory nodes. */ for (phandle = OF_child(OF_peer(0)); phandle != 0; phandle = OF_peer(phandle)) { if (OF_getprop(phandle, "name", name, sizeof(name)) <= 0) continue; if (strncmp(name, "memory", sizeof(name)) != 0 && strncmp(name, "memory@", strlen("memory@")) != 0) continue; res = parse_ofw_memory(phandle, "reg", &memp[msz]); msz += res; /* * On POWER9 Systems we might have both linux,usable-memory and * reg properties. 'reg' denotes all available memory, but we * must use 'linux,usable-memory', a subset, as some memory * regions are reserved for NVLink. */ if (OF_getproplen(phandle, "linux,usable-memory") >= 0) res = parse_ofw_memory(phandle, "linux,usable-memory", &availp[asz]); else if (OF_getproplen(phandle, "available") >= 0) res = parse_ofw_memory(phandle, "available", &availp[asz]); else res = parse_ofw_memory(phandle, "reg", &availp[asz]); asz += res; } #ifdef FDT phandle = OF_finddevice("/chosen"); if (OF_hasprop(phandle, "fdtmemreserv")) asz = excise_fdt_reserved(availp, asz); /* If the kernel is being loaded through kexec, initrd region is listed * in /chosen but the region is not marked as reserved, so, we might exclude * it here. */ if (OF_hasprop(phandle, "linux,initrd-start")) asz = excise_initrd_region(availp, asz); #endif #ifdef POWERNV if (opal_check() == 0) asz = excise_msi_region(availp, asz); #endif *memsz = msz; *availsz = asz; } void OF_initial_setup(void *fdt_ptr, void *junk, int (*openfirm)(void *)) { #ifdef AIM ofmsr[0] = mfmsr(); #ifdef __powerpc64__ ofmsr[0] &= ~PSL_SF; #ifdef __LITTLE_ENDIAN__ /* Assume OFW is BE. */ ofmsr[0] &= ~PSL_LE; #endif #else __asm __volatile("mfsprg0 %0" : "=&r"(ofmsr[1])); #endif __asm __volatile("mfsprg1 %0" : "=&r"(ofmsr[2])); __asm __volatile("mfsprg2 %0" : "=&r"(ofmsr[3])); __asm __volatile("mfsprg3 %0" : "=&r"(ofmsr[4])); openfirmware_entry = openfirm; if (ofmsr[0] & PSL_DR) ofw_real_mode = 0; else ofw_real_mode = 1; ofw_save_trap_vec(save_trap_init); #else ofw_real_mode = 1; #endif fdt = fdt_ptr; } boolean_t -OF_bootstrap() +OF_bootstrap(void) { boolean_t status = FALSE; int err = 0; #ifdef AIM if (openfirmware_entry != NULL) { if (ofw_real_mode) { status = OF_install(OFW_STD_REAL, 0); } else { #ifdef __powerpc64__ status = OF_install(OFW_STD_32BIT, 0); #else status = OF_install(OFW_STD_DIRECT, 0); #endif } if (status != TRUE) return status; err = OF_init(openfirmware); } else #endif if (fdt != NULL) { #ifdef FDT #ifdef AIM bus_space_tag_t fdt_bt; vm_offset_t tmp_fdt_ptr; vm_size_t fdt_size; uintptr_t fdt_va; #endif status = OF_install(OFW_FDT, 0); if (status != TRUE) return status; #ifdef AIM /* AIM-only for now -- Book-E does this remapping in early init */ /* Get the FDT size for mapping if we can */ tmp_fdt_ptr = pmap_early_io_map((vm_paddr_t)fdt, PAGE_SIZE); if (fdt_check_header((void *)tmp_fdt_ptr) != 0) { pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); return FALSE; } fdt_size = fdt_totalsize((void *)tmp_fdt_ptr); pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); /* * Map this for real. Use bus_space_map() to take advantage * of its auto-remapping function once the kernel is loaded. * This is a dirty hack, but what we have. */ #ifdef __LITTLE_ENDIAN__ fdt_bt = &bs_le_tag; #else fdt_bt = &bs_be_tag; #endif bus_space_map(fdt_bt, (vm_paddr_t)fdt, fdt_size, 0, &fdt_va); err = OF_init((void *)fdt_va); #else err = OF_init(fdt); #endif #endif } #ifdef FDT_DTB_STATIC /* * Check for a statically included blob already in the kernel and * needing no mapping. */ else { status = OF_install(OFW_FDT, 0); if (status != TRUE) return status; err = OF_init(&fdt_static_dtb); } #endif if (err != 0) { OF_install(NULL, 0); status = FALSE; } return (status); } #ifdef AIM void ofw_quiesce(void) { struct { cell_t name; cell_t nargs; cell_t nreturns; } args; KASSERT(!pmap_bootstrapped, ("Cannot call ofw_quiesce after VM is up")); args.name = (cell_t)(uintptr_t)"quiesce"; args.nargs = 0; args.nreturns = 0; openfirmware(&args); } static int openfirmware_core(void *args) { int result; register_t oldmsr; if (openfirmware_entry == NULL) return (-1); /* * Turn off exceptions - we really don't want to end up * anywhere unexpected with PCPU set to something strange * or the stack pointer wrong. */ oldmsr = intr_disable(); ofw_sprg_prepare(); /* Save trap vectors */ ofw_save_trap_vec(save_trap_of); /* Restore initially saved trap vectors */ ofw_restore_trap_vec(save_trap_init); #ifndef __powerpc64__ /* * Clear battable[] translations */ if (!(cpu_features & PPC_FEATURE_64)) __asm __volatile("mtdbatu 2, %0\n" "mtdbatu 3, %0" : : "r" (0)); isync(); #endif result = ofwcall(args); /* Restore trap vecotrs */ ofw_restore_trap_vec(save_trap_of); ofw_sprg_restore(); intr_restore(oldmsr); return (result); } #ifdef SMP struct ofw_rv_args { void *args; int retval; volatile int in_progress; }; static void ofw_rendezvous_dispatch(void *xargs) { struct ofw_rv_args *rv_args = xargs; /* NOTE: Interrupts are disabled here */ if (PCPU_GET(cpuid) == 0) { /* * Execute all OF calls on CPU 0 */ rv_args->retval = openfirmware_core(rv_args->args); rv_args->in_progress = 0; } else { /* * Spin with interrupts off on other CPUs while OF has * control of the machine. */ while (rv_args->in_progress) cpu_spinwait(); } } #endif static int openfirmware(void *args) { int result; #ifdef SMP struct ofw_rv_args rv_args; #endif if (openfirmware_entry == NULL) return (-1); #ifdef SMP if (cold) { result = openfirmware_core(args); } else { rv_args.args = args; rv_args.in_progress = 1; smp_rendezvous(smp_no_rendezvous_barrier, ofw_rendezvous_dispatch, smp_no_rendezvous_barrier, &rv_args); result = rv_args.retval; } #else result = openfirmware_core(args); #endif return (result); } void -OF_reboot() +OF_reboot(void) { struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t arg; } args; args.name = (cell_t)(uintptr_t)"interpret"; args.nargs = 1; args.nreturns = 0; args.arg = (cell_t)(uintptr_t)"reset-all"; openfirmware_core(&args); /* Don't do rendezvous! */ for (;;); /* just in case */ } #endif /* AIM */ void OF_getetheraddr(device_t dev, u_char *addr) { phandle_t node; node = ofw_bus_get_node(dev); OF_getprop(node, "local-mac-address", addr, ETHER_ADDR_LEN); } /* * Return a bus handle and bus tag that corresponds to the register * numbered regno for the device referenced by the package handle * dev. This function is intended to be used by console drivers in * early boot only. It works by mapping the address of the device's * register in the address space of its parent and recursively walk * the device tree upward this way. */ int OF_decode_addr(phandle_t dev, int regno, bus_space_tag_t *tag, bus_space_handle_t *handle, bus_size_t *sz) { bus_addr_t addr; bus_size_t size; pcell_t pci_hi; int flags, res; res = ofw_reg_to_paddr(dev, regno, &addr, &size, &pci_hi); if (res < 0) return (res); if (pci_hi == OFW_PADDR_NOT_PCI) { *tag = &bs_be_tag; flags = 0; } else { *tag = &bs_le_tag; flags = (pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) ? BUS_SPACE_MAP_PREFETCHABLE: 0; } if (sz != NULL) *sz = size; return (bus_space_map(*tag, addr, size, flags, handle)); } diff --git a/sys/powerpc/powerpc/cpu.c b/sys/powerpc/powerpc/cpu.c index 300a738f796d..5e267c37a7db 100644 --- a/sys/powerpc/powerpc/cpu.c +++ b/sys/powerpc/powerpc/cpu.c @@ -1,847 +1,847 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Matt Thomas. * Copyright (c) 2001 Tsubai Masanari. * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by * Internet Research Institute, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2003 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from $NetBSD: cpu_subr.c,v 1.1 2003/02/03 17:10:09 matt Exp $ * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void cpu_6xx_setup(int cpuid, uint16_t vers); static void cpu_970_setup(int cpuid, uint16_t vers); static void cpu_booke_setup(int cpuid, uint16_t vers); static void cpu_powerx_setup(int cpuid, uint16_t vers); int powerpc_pow_enabled; void (*cpu_idle_hook)(sbintime_t) = NULL; static void cpu_idle_60x(sbintime_t); static void cpu_idle_booke(sbintime_t); #ifdef BOOKE_E500 static void cpu_idle_e500mc(sbintime_t sbt); #endif #if defined(__powerpc64__) && defined(AIM) static void cpu_idle_powerx(sbintime_t); static void cpu_idle_power9(sbintime_t); #endif struct cputab { const char *name; uint16_t version; uint16_t revfmt; int features; /* Do not include PPC_FEATURE_32 or * PPC_FEATURE_HAS_MMU */ int features2; void (*cpu_setup)(int cpuid, uint16_t vers); }; #define REVFMT_MAJMIN 1 /* %u.%u */ #define REVFMT_HEX 2 /* 0x%04x */ #define REVFMT_DEC 3 /* %u */ static const struct cputab models[] = { { "Motorola PowerPC 601", MPC601, REVFMT_DEC, PPC_FEATURE_HAS_FPU | PPC_FEATURE_UNIFIED_CACHE, 0, cpu_6xx_setup }, { "Motorola PowerPC 602", MPC602, REVFMT_DEC, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603", MPC603, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603e", MPC603e, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603ev", MPC603ev, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 604", MPC604, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 604ev", MPC604ev, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 620", MPC620, REVFMT_HEX, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU, 0, NULL }, { "Motorola PowerPC 750", MPC750, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "IBM PowerPC 750FX", IBM750FX, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "IBM PowerPC 970", IBM970, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970FX", IBM970FX, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970GX", IBM970GX, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970MP", IBM970MP, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM POWER4", IBMPOWER4, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4, 0, NULL }, { "IBM POWER4+", IBMPOWER4PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4, 0, NULL }, { "IBM POWER5", IBMPOWER5, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4 | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP, 0, NULL }, { "IBM POWER5+", IBMPOWER5PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP, 0, NULL }, { "IBM POWER6", IBMPOWER6, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_TRUE_LE, 0, NULL }, { "IBM POWER7", IBMPOWER7, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_DSCR, NULL }, { "IBM POWER7+", IBMPOWER7PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX, PPC_FEATURE2_DSCR, NULL }, { "IBM POWER8E", IBMPOWER8E, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER8NVL", IBMPOWER8NVL, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER8", IBMPOWER8, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER9", IBMPOWER9, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_EBB | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO | PPC_FEATURE2_ARCH_3_00 | PPC_FEATURE2_HAS_IEEE128 | PPC_FEATURE2_DARN, cpu_powerx_setup }, { "Motorola PowerPC 7400", MPC7400, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7410", MPC7410, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7450", MPC7450, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7455", MPC7455, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7457", MPC7457, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7447A", MPC7447A, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7448", MPC7448, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 8240", MPC8240, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 8245", MPC8245, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Freescale e500v1 core", FSL_E500v1, REVFMT_MAJMIN, PPC_FEATURE_HAS_SPE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_BOOKE, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e500v2 core", FSL_E500v2, REVFMT_MAJMIN, PPC_FEATURE_HAS_SPE | PPC_FEATURE_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e500mc core", FSL_E500mc, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e5500 core", FSL_E5500, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e6500 core", FSL_E6500, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "IBM Cell Broadband Engine", IBMCELLBE, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_CELL | PPC_FEATURE_SMT, 0, NULL}, { "Unknown PowerPC CPU", 0, REVFMT_HEX, 0, 0, NULL }, }; static void cpu_6xx_print_cacheinfo(u_int, uint16_t); static int cpu_feature_bit(SYSCTL_HANDLER_ARGS); static char model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, ""); static const struct cputab *cput; u_long cpu_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU; u_long cpu_features2 = 0; SYSCTL_OPAQUE(_hw, OID_AUTO, cpu_features, CTLFLAG_RD, &cpu_features, sizeof(cpu_features), "LX", "PowerPC CPU features"); SYSCTL_OPAQUE(_hw, OID_AUTO, cpu_features2, CTLFLAG_RD, &cpu_features2, sizeof(cpu_features2), "LX", "PowerPC CPU features 2"); #ifdef __powerpc64__ register_t lpcr = LPCR_LPES; #endif /* Provide some user-friendly aliases for bits in cpu_features */ SYSCTL_PROC(_hw, OID_AUTO, floatingpoint, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, PPC_FEATURE_HAS_FPU, cpu_feature_bit, "I", "Floating point instructions executed in hardware"); SYSCTL_PROC(_hw, OID_AUTO, altivec, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, PPC_FEATURE_HAS_ALTIVEC, cpu_feature_bit, "I", "CPU supports Altivec"); /* * Phase 1 (early) CPU setup. Setup the cpu_features/cpu_features2 variables, * so they can be used during platform and MMU bringup. */ void -cpu_feature_setup() +cpu_feature_setup(void) { u_int pvr; uint16_t vers; const struct cputab *cp; pvr = mfpvr(); vers = pvr >> 16; for (cp = models; cp->version != 0; cp++) { if (cp->version == vers) break; } cput = cp; cpu_features |= cp->features; cpu_features2 |= cp->features2; } void cpu_setup(u_int cpuid) { uint64_t cps; const char *name; u_int maj, min, pvr; uint16_t rev, revfmt, vers; pvr = mfpvr(); vers = pvr >> 16; rev = pvr; switch (vers) { case MPC7410: min = (pvr >> 0) & 0xff; maj = min <= 4 ? 1 : 2; break; case FSL_E500v1: case FSL_E500v2: case FSL_E500mc: case FSL_E5500: maj = (pvr >> 4) & 0xf; min = (pvr >> 0) & 0xf; break; default: maj = (pvr >> 8) & 0xf; min = (pvr >> 0) & 0xf; } revfmt = cput->revfmt; name = cput->name; if (rev == MPC750 && pvr == 15) { name = "Motorola MPC755"; revfmt = REVFMT_HEX; } strncpy(model, name, sizeof(model) - 1); printf("cpu%d: %s revision ", cpuid, name); switch (revfmt) { case REVFMT_MAJMIN: printf("%u.%u", maj, min); break; case REVFMT_HEX: printf("0x%04x", rev); break; case REVFMT_DEC: printf("%u", rev); break; } if (cpu_est_clockrate(0, &cps) == 0) printf(", %jd.%02jd MHz", cps / 1000000, (cps / 10000) % 100); printf("\n"); printf("cpu%d: Features %b\n", cpuid, (int)cpu_features, PPC_FEATURE_BITMASK); if (cpu_features2 != 0) printf("cpu%d: Features2 %b\n", cpuid, (int)cpu_features2, PPC_FEATURE2_BITMASK); /* * Configure CPU */ if (cput->cpu_setup != NULL) cput->cpu_setup(cpuid, vers); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *cps) { uint16_t vers; register_t msr; phandle_t cpu, dev, root; uint32_t freq32; int res = 0; char buf[8]; vers = mfpvr() >> 16; msr = mfmsr(); mtmsr(msr & ~PSL_EE); switch (vers) { case MPC7450: case MPC7455: case MPC7457: case MPC750: case IBM750FX: case MPC7400: case MPC7410: case MPC7447A: case MPC7448: mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); mtspr(SPR_PMC1_74XX, 0); mtspr(SPR_MMCR0_74XX, SPR_MMCR0_74XX_PMC1SEL(PMCN_CYCLES)); DELAY(1000); *cps = (mfspr(SPR_PMC1_74XX) * 1000) + 4999; mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); mtmsr(msr); return (0); case IBM970: case IBM970FX: case IBM970MP: isync(); mtspr(SPR_MMCR0, SPR_MMCR0_FC); isync(); mtspr(SPR_MMCR1, 0); mtspr(SPR_MMCRA, 0); mtspr(SPR_PMC1, 0); mtspr(SPR_MMCR0, SPR_MMCR0_PMC1SEL(PMC970N_CYCLES)); isync(); DELAY(1000); powerpc_sync(); mtspr(SPR_MMCR0, SPR_MMCR0_FC); *cps = (mfspr(SPR_PMC1) * 1000) + 4999; mtmsr(msr); return (0); default: root = OF_peer(0); if (root == 0) return (ENXIO); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } cpu = OF_child(dev); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); if (OF_getprop(cpu, "ibm,extended-clock-frequency", cps, sizeof(*cps)) >= 0) { *cps = be64toh(*cps); return (0); } else if (OF_getencprop(cpu, "clock-frequency", &freq32, sizeof(freq32)) >= 0) { *cps = freq32; return (0); } else { return (ENOENT); } } } void cpu_6xx_setup(int cpuid, uint16_t vers) { register_t hid0, pvr; const char *bitmask; hid0 = mfspr(SPR_HID0); pvr = mfpvr(); /* * Configure power-saving mode. */ switch (vers) { case MPC603: case MPC603e: case MPC603ev: case MPC604ev: case MPC750: case IBM750FX: case MPC7400: case MPC7410: case MPC8240: case MPC8245: /* Select DOZE mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_DOZE | HID0_DPM; powerpc_pow_enabled = 1; break; case MPC7448: case MPC7447A: case MPC7457: case MPC7455: case MPC7450: /* Enable the 7450 branch caches */ hid0 |= HID0_SGE | HID0_BTIC; hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT; /* Disable BTIC on 7450 Rev 2.0 or earlier and on 7457 */ if (((pvr >> 16) == MPC7450 && (pvr & 0xFFFF) <= 0x0200) || (pvr >> 16) == MPC7457) hid0 &= ~HID0_BTIC; /* Select NAP mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_NAP | HID0_DPM; powerpc_pow_enabled = 1; break; default: /* No power-saving mode is available. */ ; } switch (vers) { case IBM750FX: case MPC750: hid0 &= ~HID0_DBP; /* XXX correct? */ hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; break; case MPC7400: case MPC7410: hid0 &= ~HID0_SPD; hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; hid0 |= HID0_EIEC; break; } mtspr(SPR_HID0, hid0); if (bootverbose) cpu_6xx_print_cacheinfo(cpuid, vers); switch (vers) { case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: bitmask = HID0_7450_BITMASK; break; default: bitmask = HID0_BITMASK; break; } printf("cpu%d: HID0 %b\n", cpuid, (int)hid0, bitmask); if (cpu_idle_hook == NULL) cpu_idle_hook = cpu_idle_60x; } static void cpu_6xx_print_cacheinfo(u_int cpuid, uint16_t vers) { register_t hid; hid = mfspr(SPR_HID0); printf("cpu%u: ", cpuid); printf("L1 I-cache %sabled, ", (hid & HID0_ICE) ? "en" : "dis"); printf("L1 D-cache %sabled\n", (hid & HID0_DCE) ? "en" : "dis"); printf("cpu%u: ", cpuid); if (mfspr(SPR_L2CR) & L2CR_L2E) { switch (vers) { case MPC7450: case MPC7455: case MPC7457: printf("256KB L2 cache, "); if (mfspr(SPR_L3CR) & L3CR_L3E) printf("%cMB L3 backside cache", mfspr(SPR_L3CR) & L3CR_L3SIZ ? '2' : '1'); else printf("L3 cache disabled"); printf("\n"); break; case IBM750FX: printf("512KB L2 cache\n"); break; default: switch (mfspr(SPR_L2CR) & L2CR_L2SIZ) { case L2SIZ_256K: printf("256KB "); break; case L2SIZ_512K: printf("512KB "); break; case L2SIZ_1M: printf("1MB "); break; } printf("write-%s", (mfspr(SPR_L2CR) & L2CR_L2WT) ? "through" : "back"); if (mfspr(SPR_L2CR) & L2CR_L2PE) printf(", with parity"); printf(" backside cache\n"); break; } } else printf("L2 cache disabled\n"); } static void cpu_booke_setup(int cpuid, uint16_t vers) { #ifdef BOOKE_E500 register_t hid0; const char *bitmask; hid0 = mfspr(SPR_HID0); switch (vers) { case FSL_E500mc: bitmask = HID0_E500MC_BITMASK; cpu_idle_hook = cpu_idle_e500mc; break; case FSL_E5500: case FSL_E6500: bitmask = HID0_E5500_BITMASK; cpu_idle_hook = cpu_idle_e500mc; break; case FSL_E500v1: case FSL_E500v2: /* Only e500v1/v2 support HID0 power management setup. */ /* Program power-management mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_DOZE; mtspr(SPR_HID0, hid0); default: bitmask = HID0_E500_BITMASK; break; } printf("cpu%d: HID0 %b\n", cpuid, (int)hid0, bitmask); #endif if (cpu_idle_hook == NULL) cpu_idle_hook = cpu_idle_booke; } static void cpu_970_setup(int cpuid, uint16_t vers) { #ifdef AIM uint32_t hid0_hi, hid0_lo; __asm __volatile ("mfspr %0,%2; clrldi %1,%0,32; srdi %0,%0,32;" : "=r" (hid0_hi), "=r" (hid0_lo) : "K" (SPR_HID0)); /* Configure power-saving mode */ switch (vers) { case IBM970MP: hid0_hi |= (HID0_DEEPNAP | HID0_NAP | HID0_DPM); hid0_hi &= ~HID0_DOZE; break; default: hid0_hi |= (HID0_NAP | HID0_DPM); hid0_hi &= ~(HID0_DOZE | HID0_DEEPNAP); break; } powerpc_pow_enabled = 1; __asm __volatile (" \ sync; isync; \ sldi %0,%0,32; or %0,%0,%1; \ mtspr %2, %0; \ mfspr %0, %2; mfspr %0, %2; mfspr %0, %2; \ mfspr %0, %2; mfspr %0, %2; mfspr %0, %2; \ sync; isync" :: "r" (hid0_hi), "r"(hid0_lo), "K" (SPR_HID0)); __asm __volatile ("mfspr %0,%1; srdi %0,%0,32;" : "=r" (hid0_hi) : "K" (SPR_HID0)); printf("cpu%d: HID0 %b\n", cpuid, (int)(hid0_hi), HID0_970_BITMASK); #endif cpu_idle_hook = cpu_idle_60x; } static void cpu_powerx_setup(int cpuid, uint16_t vers) { #if defined(__powerpc64__) && defined(AIM) if ((mfmsr() & PSL_HV) == 0) return; /* Nuke the FSCR, to disable all facilities. */ mtspr(SPR_FSCR, 0); /* Configure power-saving */ switch (vers) { case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: cpu_idle_hook = cpu_idle_powerx; mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_PECE_WAKESET); isync(); break; case IBMPOWER9: cpu_idle_hook = cpu_idle_power9; mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_PECE_WAKESET); isync(); break; default: return; } #endif } static int cpu_feature_bit(SYSCTL_HANDLER_ARGS) { int result; result = (cpu_features & arg2) ? 1 : 0; return (sysctl_handle_int(oidp, &result, 0, req)); } void cpu_idle(int busy) { sbintime_t sbt = -1; #ifdef INVARIANTS if ((mfmsr() & PSL_EE) != PSL_EE) { struct thread *td = curthread; printf("td msr %#lx\n", (u_long)td->td_md.md_saved_msr); panic("ints disabled in idleproc!"); } #endif CTR1(KTR_SPARE2, "cpu_idle(%d)", busy); if (cpu_idle_hook != NULL) { if (!busy) { critical_enter(); sbt = cpu_idleclock(); } cpu_idle_hook(sbt); if (!busy) { cpu_activeclock(); critical_exit(); } } CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy); } static void cpu_idle_60x(sbintime_t sbt) { #ifdef AIM register_t msr; uint16_t vers; #endif if (!powerpc_pow_enabled) return; #ifdef AIM msr = mfmsr(); vers = mfpvr() >> 16; switch (vers) { case IBM970: case IBM970FX: case IBM970MP: case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: /* 0x7e00066c: dssall */ __asm __volatile("\ .long 0x7e00066c; sync; mtmsr %0; isync" :: "r"(msr | PSL_POW)); break; default: powerpc_sync(); mtmsr(msr | PSL_POW); break; } #endif } #ifdef BOOKE_E500 static void cpu_idle_e500mc(sbintime_t sbt) { /* * Base binutils doesn't know what the 'wait' instruction is, so * use the opcode encoding here. */ __asm __volatile(".long 0x7c00007c"); } #endif static void cpu_idle_booke(sbintime_t sbt) { #ifdef BOOKE_E500 register_t msr; msr = mfmsr(); powerpc_sync(); mtmsr(msr | PSL_WE); #endif } #if defined(__powerpc64__) && defined(AIM) static void cpu_idle_powerx(sbintime_t sbt) { /* Sleeping when running on one cpu gives no advantages - avoid it */ if (smp_started == 0) return; spinlock_enter(); if (sched_runnable()) { spinlock_exit(); return; } if (can_wakeup == 0) can_wakeup = 1; mb(); enter_idle_powerx(); spinlock_exit(); } static void cpu_idle_power9(sbintime_t sbt) { register_t msr; msr = mfmsr(); /* Suspend external interrupts until stop instruction completes. */ mtmsr(msr & ~PSL_EE); /* Set the stop state to lowest latency, wake up to next instruction */ /* Set maximum transition level to 2, for deepest lossless sleep. */ mtspr(SPR_PSSCR, (2 << PSSCR_MTL_S) | (0 << PSSCR_RL_S)); /* "stop" instruction (PowerISA 3.0) */ __asm __volatile (".long 0x4c0002e4"); /* * Re-enable external interrupts to capture the interrupt that caused * the wake up. */ mtmsr(msr); } #endif int cpu_idle_wakeup(int cpu) { return (0); } diff --git a/sys/powerpc/powerpc/fpu.c b/sys/powerpc/powerpc/fpu.c index 45a77235bd52..f384425b583a 100644 --- a/sys/powerpc/powerpc/fpu.c +++ b/sys/powerpc/powerpc/fpu.c @@ -1,263 +1,263 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1996 Wolfgang Solfrank. * Copyright (C) 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: fpu.c,v 1.5 2001/07/22 11:29:46 wiz Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include static void save_fpu_int(struct thread *td) { register_t msr; struct pcb *pcb; pcb = td->td_pcb; /* * Temporarily re-enable floating-point during the save */ msr = mfmsr(); if (pcb->pcb_flags & PCB_VSX) mtmsr(msr | PSL_FP | PSL_VSX); else mtmsr(msr | PSL_FP); /* * Save the floating-point registers and FPSCR to the PCB */ if (pcb->pcb_flags & PCB_VSX) { #define SFP(n) __asm ("stxvw4x " #n ", 0,%0" \ :: "b"(&pcb->pcb_fpu.fpr[n])); SFP(0); SFP(1); SFP(2); SFP(3); SFP(4); SFP(5); SFP(6); SFP(7); SFP(8); SFP(9); SFP(10); SFP(11); SFP(12); SFP(13); SFP(14); SFP(15); SFP(16); SFP(17); SFP(18); SFP(19); SFP(20); SFP(21); SFP(22); SFP(23); SFP(24); SFP(25); SFP(26); SFP(27); SFP(28); SFP(29); SFP(30); SFP(31); #undef SFP } else { #define SFP(n) __asm ("stfd " #n ", 0(%0)" \ :: "b"(&pcb->pcb_fpu.fpr[n].fpr)); SFP(0); SFP(1); SFP(2); SFP(3); SFP(4); SFP(5); SFP(6); SFP(7); SFP(8); SFP(9); SFP(10); SFP(11); SFP(12); SFP(13); SFP(14); SFP(15); SFP(16); SFP(17); SFP(18); SFP(19); SFP(20); SFP(21); SFP(22); SFP(23); SFP(24); SFP(25); SFP(26); SFP(27); SFP(28); SFP(29); SFP(30); SFP(31); #undef SFP } __asm __volatile ("mffs 0; stfd 0,0(%0)" :: "b"(&pcb->pcb_fpu.fpscr)); /* * Disable floating-point again */ isync(); mtmsr(msr); } void enable_fpu(struct thread *td) { register_t msr; struct pcb *pcb; struct trapframe *tf; pcb = td->td_pcb; tf = trapframe(td); /* * Save the thread's FPU CPU number, and set the CPU's current * FPU thread */ td->td_pcb->pcb_fpcpu = PCPU_GET(cpuid); PCPU_SET(fputhread, td); /* * Enable the FPU for when the thread returns from the exception. * If this is the first time the FPU has been used by the thread, * initialise the FPU registers and FPSCR to 0, and set the flag * to indicate that the FPU is in use. */ pcb->pcb_flags |= PCB_FPU; if (pcb->pcb_flags & PCB_VSX) tf->srr1 |= PSL_FP | PSL_VSX; else tf->srr1 |= PSL_FP; if (!(pcb->pcb_flags & PCB_FPREGS)) { memset(&pcb->pcb_fpu, 0, sizeof pcb->pcb_fpu); pcb->pcb_flags |= PCB_FPREGS; } /* * Temporarily enable floating-point so the registers * can be restored. */ msr = mfmsr(); if (pcb->pcb_flags & PCB_VSX) mtmsr(msr | PSL_FP | PSL_VSX); else mtmsr(msr | PSL_FP); /* * Load the floating point registers and FPSCR from the PCB. * (A value of 0xff for mtfsf specifies that all 8 4-bit fields * of the saved FPSCR are to be loaded from the FPU reg). */ __asm __volatile ("lfd 0,0(%0); mtfsf 0xff,0" :: "b"(&pcb->pcb_fpu.fpscr)); if (pcb->pcb_flags & PCB_VSX) { #define LFP(n) __asm ("lxvw4x " #n ", 0,%0" \ :: "b"(&pcb->pcb_fpu.fpr[n])); LFP(0); LFP(1); LFP(2); LFP(3); LFP(4); LFP(5); LFP(6); LFP(7); LFP(8); LFP(9); LFP(10); LFP(11); LFP(12); LFP(13); LFP(14); LFP(15); LFP(16); LFP(17); LFP(18); LFP(19); LFP(20); LFP(21); LFP(22); LFP(23); LFP(24); LFP(25); LFP(26); LFP(27); LFP(28); LFP(29); LFP(30); LFP(31); #undef LFP } else { #define LFP(n) __asm ("lfd " #n ", 0(%0)" \ :: "b"(&pcb->pcb_fpu.fpr[n].fpr)); LFP(0); LFP(1); LFP(2); LFP(3); LFP(4); LFP(5); LFP(6); LFP(7); LFP(8); LFP(9); LFP(10); LFP(11); LFP(12); LFP(13); LFP(14); LFP(15); LFP(16); LFP(17); LFP(18); LFP(19); LFP(20); LFP(21); LFP(22); LFP(23); LFP(24); LFP(25); LFP(26); LFP(27); LFP(28); LFP(29); LFP(30); LFP(31); #undef LFP } isync(); mtmsr(msr); } void save_fpu(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; save_fpu_int(td); /* * Clear the current fp thread and pcb's CPU id * XXX should this be left clear to allow lazy save/restore ? */ pcb->pcb_fpcpu = INT_MAX; PCPU_SET(fputhread, NULL); } /* * Save fpu state without dropping ownership. This will only save state if * the current fpu thread is `td'. */ void save_fpu_nodrop(struct thread *td) { if (td == PCPU_GET(fputhread)) save_fpu_int(td); } /* * Clear Floating-Point Status and Control Register */ void -cleanup_fpscr() +cleanup_fpscr(void) { register_t msr; msr = mfmsr(); mtmsr(msr | PSL_FP); mtfsf(0); isync(); mtmsr(msr); } /* * Get the current fp exception */ u_int get_fpu_exception(struct thread *td) { register_t msr; u_int ucode; register_t reg; critical_enter(); msr = mfmsr(); mtmsr(msr | PSL_FP); reg = mffs(); isync(); mtmsr(msr); critical_exit(); if (reg & FPSCR_ZX) ucode = FPE_FLTDIV; else if (reg & FPSCR_OX) ucode = FPE_FLTOVF; else if (reg & FPSCR_UX) ucode = FPE_FLTUND; else if (reg & FPSCR_XX) ucode = FPE_FLTRES; else ucode = FPE_FLTINV; return ucode; } diff --git a/sys/powerpc/powerpc/platform.c b/sys/powerpc/powerpc/platform.c index 6f8f0361d11f..7993911a7c80 100644 --- a/sys/powerpc/powerpc/platform.c +++ b/sys/powerpc/powerpc/platform.c @@ -1,405 +1,405 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Peter Grehan * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Dispatch platform calls to the appropriate platform implementation * through a previously registered kernel object. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" static platform_def_t *plat_def_impl; static platform_t plat_obj; static struct kobj_ops plat_kernel_kops; static struct platform_kobj plat_kernel_obj; static char plat_name[64] = ""; SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RD | CTLFLAG_TUN, plat_name, 0, "Platform currently in use"); static struct mem_affinity mem_info[VM_PHYSSEG_MAX + 1]; static int vm_locality_table[MAXMEMDOM * MAXMEMDOM]; static struct mem_region pregions[PHYS_AVAIL_SZ]; static struct numa_mem_region numa_pregions[PHYS_AVAIL_SZ]; static struct mem_region aregions[PHYS_AVAIL_SZ]; static int nnumapregions, npregions, naregions; /* * Memory region utilities: determine if two regions overlap, * and merge two overlapping regions into one */ static int memr_overlap(struct mem_region *r1, struct mem_region *r2) { if ((r1->mr_start + r1->mr_size) < r2->mr_start || (r2->mr_start + r2->mr_size) < r1->mr_start) return (FALSE); return (TRUE); } static void memr_merge(struct mem_region *from, struct mem_region *to) { vm_offset_t end; end = uqmax(to->mr_start + to->mr_size, from->mr_start + from->mr_size); to->mr_start = uqmin(from->mr_start, to->mr_start); to->mr_size = end - to->mr_start; } /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b) { const struct mem_region *regiona, *regionb; regiona = a; regionb = b; if (regiona->mr_start < regionb->mr_start) return (-1); else if (regiona->mr_start > regionb->mr_start) return (1); else return (0); } void numa_mem_regions(struct numa_mem_region **phys, int *physsz) { struct mem_affinity *mi; int i, j, maxdom, ndomain, offset; nnumapregions = 0; PLATFORM_NUMA_MEM_REGIONS(plat_obj, numa_pregions, &nnumapregions); if (physsz != NULL) *physsz = nnumapregions; if (phys != NULL) *phys = numa_pregions; if (physsz == NULL || phys == NULL) { printf("unset value\n"); return; } maxdom = 0; for (i = 0; i < nnumapregions; i++) if (numa_pregions[i].mr_domain > maxdom) maxdom = numa_pregions[i].mr_domain; mi = mem_info; for (i = 0; i < nnumapregions; i++, mi++) { mi->start = numa_pregions[i].mr_start; mi->end = numa_pregions[i].mr_start + numa_pregions[i].mr_size; mi->domain = numa_pregions[i].mr_domain; } offset = 0; vm_locality_table[offset] = 10; ndomain = maxdom + 1; if (ndomain > 1) { for (i = 0; i < ndomain; i++) { for (j = 0; j < ndomain; j++) { /* * Not sure what these values should actually be */ if (i == j) vm_locality_table[offset] = 10; else vm_locality_table[offset] = 21; offset++; } } } vm_phys_register_domains(ndomain, mem_info, vm_locality_table); } void mem_regions(struct mem_region **phys, int *physsz, struct mem_region **avail, int *availsz) { int i, j, still_merging; if (npregions == 0) { PLATFORM_MEM_REGIONS(plat_obj, pregions, &npregions, aregions, &naregions); qsort(pregions, npregions, sizeof(*pregions), mr_cmp); qsort(aregions, naregions, sizeof(*aregions), mr_cmp); /* Remove overlapping available regions */ do { still_merging = FALSE; for (i = 0; i < naregions; i++) { if (aregions[i].mr_size == 0) continue; for (j = i+1; j < naregions; j++) { if (aregions[j].mr_size == 0) continue; if (!memr_overlap(&aregions[j], &aregions[i])) continue; memr_merge(&aregions[j], &aregions[i]); /* mark inactive */ aregions[j].mr_size = 0; still_merging = TRUE; } } } while (still_merging == TRUE); /* Collapse zero-length available regions */ for (i = 0; i < naregions; i++) { if (aregions[i].mr_size == 0) { memcpy(&aregions[i], &aregions[i+1], (naregions - i - 1)*sizeof(*aregions)); naregions--; i--; } } } if (phys != NULL) *phys = pregions; if (avail != NULL) *avail = aregions; if (physsz != NULL) *physsz = npregions; if (availsz != NULL) *availsz = naregions; } int mem_valid(vm_offset_t addr, int len) { int i; if (npregions == 0) { struct mem_region *p, *a; int na, np; mem_regions(&p, &np, &a, &na); } for (i = 0; i < npregions; i++) if ((addr >= pregions[i].mr_start) && (addr + len <= pregions[i].mr_start + pregions[i].mr_size)) return (0); return (EFAULT); } vm_offset_t platform_real_maxaddr(void) { return (PLATFORM_REAL_MAXADDR(plat_obj)); } const char * -installed_platform() +installed_platform(void) { return (plat_def_impl->name); } u_long platform_timebase_freq(struct cpuref *cpu) { return (PLATFORM_TIMEBASE_FREQ(plat_obj, cpu)); } /* * Put the current CPU, as last step in suspend, to sleep */ void -platform_sleep() +platform_sleep(void) { PLATFORM_SLEEP(plat_obj); } int platform_smp_first_cpu(struct cpuref *cpu) { return (PLATFORM_SMP_FIRST_CPU(plat_obj, cpu)); } int platform_smp_next_cpu(struct cpuref *cpu) { return (PLATFORM_SMP_NEXT_CPU(plat_obj, cpu)); } int platform_smp_get_bsp(struct cpuref *cpu) { return (PLATFORM_SMP_GET_BSP(plat_obj, cpu)); } int platform_smp_start_cpu(struct pcpu *cpu) { return (PLATFORM_SMP_START_CPU(plat_obj, cpu)); } void -platform_smp_ap_init() +platform_smp_ap_init(void) { PLATFORM_SMP_AP_INIT(plat_obj); } void platform_smp_probe_threads(void) { PLATFORM_SMP_PROBE_THREADS(plat_obj); } #ifdef SMP struct cpu_group * cpu_topo(void) { return (PLATFORM_SMP_TOPO(plat_obj)); } #endif int platform_node_numa_domain(phandle_t node) { return (PLATFORM_NODE_NUMA_DOMAIN(plat_obj, node)); } /* * Reset back to firmware. */ void -cpu_reset() +cpu_reset(void) { PLATFORM_RESET(plat_obj); } void platform_smp_timebase_sync(u_long tb, int ap) { PLATFORM_SMP_TIMEBASE_SYNC(plat_obj, tb, ap); } /* * Platform install routines. Highest priority wins, using the same * algorithm as bus attachment. */ SET_DECLARE(platform_set, platform_def_t); void -platform_probe_and_attach() +platform_probe_and_attach(void) { platform_def_t **platpp, *platp; int prio, best_prio; plat_obj = &plat_kernel_obj; best_prio = 0; /* * Try to locate the best platform kobj */ SET_FOREACH(platpp, platform_set) { platp = *platpp; /* * Take care of compiling the selected class, and * then statically initialise the MMU object */ kobj_class_compile_static(platp, &plat_kernel_kops); kobj_init_static((kobj_t)plat_obj, platp); prio = PLATFORM_PROBE(plat_obj); /* Check for errors */ if (prio > 0) continue; /* * Check if this module was specifically requested through * the loader tunable we provide. */ if (strcmp(platp->name,plat_name) == 0) { plat_def_impl = platp; break; } /* Otherwise, see if it is better than our current best */ if (plat_def_impl == NULL || prio > best_prio) { best_prio = prio; plat_def_impl = platp; } /* * We can't free the KOBJ, since it is static. Reset the ops * member of this class so that we can come back later. */ platp->ops = NULL; } if (plat_def_impl == NULL) panic("No platform module found!"); /* * Recompile to make sure we ended with the * correct one, and then attach. */ kobj_class_compile_static(plat_def_impl, &plat_kernel_kops); kobj_init_static((kobj_t)plat_obj, plat_def_impl); strlcpy(plat_name,plat_def_impl->name,sizeof(plat_name)); PLATFORM_ATTACH(plat_obj); } diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index a0caddb9e012..42ff7a6de304 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -1,257 +1,257 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Peter Grehan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Dispatch MI pmap calls to the appropriate MMU implementation * through a previously registered kernel object. * * Before pmap_bootstrap() can be called, a CPU module must have * called pmap_mmu_install(). This may be called multiple times: * the highest priority call will be installed as the default * MMU handler when pmap_bootstrap() is called. * * It is required that mutex_init() be called before pmap_bootstrap(), * as the PMAP layer makes extensive use of mutexes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include mmu_t mmu_obj; /* * pmap globals */ struct pmap kernel_pmap_store; vm_offset_t msgbuf_phys; vm_offset_t kernel_vm_end; vm_offset_t virtual_avail; vm_offset_t virtual_end; caddr_t crashdumpmap; int pmap_bootstrapped; /* Default level 0 reservations consist of 512 pages (2MB superpage). */ int vm_level_0_order = 9; SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); int superpages_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, CTLFLAG_RDTUN, &superpages_enabled, 0, "Enable support for transparent superpages"); #ifdef AIM int pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b) { if (PVO_VADDR(a) < PVO_VADDR(b)) return (-1); else if (PVO_VADDR(a) > PVO_VADDR(b)) return (1); return (0); } RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare); #endif static int pmap_nomethod(void) { return (0); } #define DEFINE_PMAP_IFUNC(ret, func, args) \ DEFINE_IFUNC(, ret, pmap_##func, args) { \ pmap_##func##_t f; \ f = PMAP_RESOLVE_FUNC(func); \ return (f != NULL ? f : (pmap_##func##_t)pmap_nomethod);\ } #define DEFINE_DUMPSYS_IFUNC(ret, func, args) \ DEFINE_IFUNC(, ret, dumpsys_##func, args) { \ pmap_dumpsys_##func##_t f; \ f = PMAP_RESOLVE_FUNC(dumpsys_##func); \ return (f != NULL ? f : (pmap_dumpsys_##func##_t)pmap_nomethod);\ } DEFINE_PMAP_IFUNC(void, activate, (struct thread *)); DEFINE_PMAP_IFUNC(void, advise, (pmap_t, vm_offset_t, vm_offset_t, int)); DEFINE_PMAP_IFUNC(void, align_superpage, (vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t)); DEFINE_PMAP_IFUNC(void, clear_modify, (vm_page_t)); DEFINE_PMAP_IFUNC(void, copy, (pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)); DEFINE_PMAP_IFUNC(int, enter, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t)); DEFINE_PMAP_IFUNC(void, enter_quick, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t)); DEFINE_PMAP_IFUNC(void, enter_object, (pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t)); DEFINE_PMAP_IFUNC(vm_paddr_t, extract, (pmap_t, vm_offset_t)); DEFINE_PMAP_IFUNC(vm_page_t, extract_and_hold, (pmap_t, vm_offset_t, vm_prot_t)); DEFINE_PMAP_IFUNC(void, kenter, (vm_offset_t, vm_paddr_t)); DEFINE_PMAP_IFUNC(void, kenter_attr, (vm_offset_t, vm_paddr_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(vm_paddr_t, kextract, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t)); DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t)); DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t)); DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t)); DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t)); DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)); DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t)); DEFINE_PMAP_IFUNC(void, qenter, (vm_offset_t, vm_page_t *, int)); DEFINE_PMAP_IFUNC(void, qremove, (vm_offset_t, int)); DEFINE_PMAP_IFUNC(vm_offset_t, quick_enter_page, (vm_page_t)); DEFINE_PMAP_IFUNC(void, quick_remove_page, (vm_offset_t)); DEFINE_PMAP_IFUNC(boolean_t, ts_referenced, (vm_page_t)); DEFINE_PMAP_IFUNC(void, release, (pmap_t)); DEFINE_PMAP_IFUNC(void, remove, (pmap_t, vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, remove_all, (vm_page_t)); DEFINE_PMAP_IFUNC(void, remove_pages, (pmap_t)); DEFINE_PMAP_IFUNC(void, remove_write, (vm_page_t)); DEFINE_PMAP_IFUNC(void, unwire, (pmap_t, vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, zero_page, (vm_page_t)); DEFINE_PMAP_IFUNC(void, zero_page_area, (vm_page_t, int, int)); DEFINE_PMAP_IFUNC(void, copy_page, (vm_page_t, vm_page_t)); DEFINE_PMAP_IFUNC(void, copy_pages, (vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize)); DEFINE_PMAP_IFUNC(void, growkernel, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, init, (void)); DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int)); DEFINE_PMAP_IFUNC(int, pinit, (pmap_t)); DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t)); DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *)); DEFINE_PMAP_IFUNC(void, deactivate, (struct thread *)); DEFINE_PMAP_IFUNC(void, bootstrap, (vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, cpu_bootstrap, (int)); DEFINE_PMAP_IFUNC(void *, mapdev, (vm_paddr_t, vm_size_t)); DEFINE_PMAP_IFUNC(void *, mapdev_attr, (vm_paddr_t, vm_size_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, page_set_memattr, (vm_page_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, unmapdev, (void *, vm_size_t)); DEFINE_PMAP_IFUNC(int, map_user_ptr, (pmap_t, volatile const void *, void **, size_t, size_t *)); DEFINE_PMAP_IFUNC(int, decode_kernel_ptr, (vm_offset_t, int *, vm_offset_t *)); DEFINE_PMAP_IFUNC(boolean_t, dev_direct_mapped, (vm_paddr_t, vm_size_t)); DEFINE_PMAP_IFUNC(void, sync_icache, (pmap_t, vm_offset_t, vm_size_t)); DEFINE_PMAP_IFUNC(int, change_attr, (vm_offset_t, vm_size_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, page_array_startup, (long)); DEFINE_PMAP_IFUNC(void, tlbie_all, (void)); DEFINE_DUMPSYS_IFUNC(void, map_chunk, (vm_paddr_t, size_t, void **)); DEFINE_DUMPSYS_IFUNC(void, unmap_chunk, (vm_paddr_t, size_t, void *)); DEFINE_DUMPSYS_IFUNC(void, pa_init, (void)); DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (struct bitset *)); DEFINE_DUMPSYS_IFUNC(void *, dump_pmap_init, (unsigned)); DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *)); /* * MMU install routines. Highest priority wins, equal priority also * overrides allowing last-set to win. */ SET_DECLARE(mmu_set, struct mmu_kobj); boolean_t pmap_mmu_install(char *name, int prio) { mmu_t *mmupp, mmup; static int curr_prio = 0; /* * Try and locate the MMU kobj corresponding to the name */ SET_FOREACH(mmupp, mmu_set) { mmup = *mmupp; if (mmup->name && !strcmp(mmup->name, name) && (prio >= curr_prio || mmu_obj == NULL)) { curr_prio = prio; mmu_obj = mmup; return (TRUE); } } return (FALSE); } /* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */ void -pmap_mmu_init() +pmap_mmu_init(void) { if (mmu_obj->funcs->install != NULL) (mmu_obj->funcs->install)(); } const char * pmap_mmu_name(void) { return (mmu_obj->name); } int unmapped_buf_allowed; boolean_t pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { switch (mode) { case VM_MEMATTR_DEFAULT: case VM_MEMATTR_UNCACHEABLE: case VM_MEMATTR_CACHEABLE: case VM_MEMATTR_WRITE_COMBINING: case VM_MEMATTR_WRITE_BACK: case VM_MEMATTR_WRITE_THROUGH: case VM_MEMATTR_PREFETCHABLE: return (TRUE); default: return (FALSE); } } diff --git a/sys/powerpc/ps3/mmu_ps3.c b/sys/powerpc/ps3/mmu_ps3.c index c21faccf631c..b60e6775be30 100644 --- a/sys/powerpc/ps3/mmu_ps3.c +++ b/sys/powerpc/ps3/mmu_ps3.c @@ -1,291 +1,291 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2010 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ps3-hvcall.h" #define VSID_HASH_MASK 0x0000007fffffffffUL #define PTESYNC() __asm __volatile("ptesync") extern int ps3fb_remap(void); static uint64_t mps3_vas_id; /* * Kernel MMU interface */ static void mps3_install(void); static void mps3_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend); static void mps3_cpu_bootstrap(int ap); static int64_t mps3_pte_synch(struct pvo_entry *); static int64_t mps3_pte_clear(struct pvo_entry *, uint64_t ptebit); static int64_t mps3_pte_unset(struct pvo_entry *); static int64_t mps3_pte_insert(struct pvo_entry *); static struct pmap_funcs mps3_methods = { .install = mps3_install, .bootstrap = mps3_bootstrap, .cpu_bootstrap = mps3_cpu_bootstrap, }; static struct moea64_funcs mps3_funcs = { .pte_synch = mps3_pte_synch, .pte_clear = mps3_pte_clear, .pte_unset = mps3_pte_unset, .pte_insert = mps3_pte_insert, }; MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, oea64_mmu); static struct mtx mps3_table_lock; static void -mps3_install() +mps3_install(void) { moea64_ops = &mps3_funcs; moea64_install(); } static void mps3_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { uint64_t final_pteg_count; mtx_init(&mps3_table_lock, "page table", NULL, MTX_DEF); moea64_early_bootstrap(kernelstart, kernelend); /* In case we had a page table already */ lv1_destruct_virtual_address_space(0); /* Allocate new hardware page table */ lv1_construct_virtual_address_space( 20 /* log_2(moea64_pteg_count) */, 2 /* n page sizes */, (24UL << 56) | (16UL << 48) /* page sizes 16 MB + 64 KB */, &mps3_vas_id, &final_pteg_count ); lv1_select_virtual_address_space(mps3_vas_id); moea64_pteg_count = final_pteg_count / sizeof(struct lpteg); moea64_mid_bootstrap(kernelstart, kernelend); moea64_late_bootstrap(kernelstart, kernelend); } static void mps3_cpu_bootstrap(int ap) { struct slb *slb = PCPU_GET(aim.slb); register_t seg0; int i; mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); /* * Select the page table we configured above and set up the FB mapping * so we can have a console. */ lv1_select_virtual_address_space(mps3_vas_id); if (!ap) ps3fb_remap(); /* * Install kernel SLB entries */ __asm __volatile ("slbia"); __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); for (i = 0; i < 64; i++) { if (!(slb[i].slbe & SLBE_VALID)) continue; __asm __volatile ("slbmte %0, %1" :: "r"(slb[i].slbv), "r"(slb[i].slbe)); } } static int64_t mps3_pte_synch_locked(struct pvo_entry *pvo) { uint64_t halfbucket[4], rcbits; PTESYNC(); lv1_read_htab_entries(mps3_vas_id, pvo->pvo_pte.slot & ~0x3UL, &halfbucket[0], &halfbucket[1], &halfbucket[2], &halfbucket[3], &rcbits); /* Check if present in page table */ if ((halfbucket[pvo->pvo_pte.slot & 0x3] & LPTE_AVPN_MASK) != ((pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) & LPTE_AVPN_MASK)) return (-1); if (!(halfbucket[pvo->pvo_pte.slot & 0x3] & LPTE_VALID)) return (-1); /* * rcbits contains the low 12 bits of each PTE's 2nd part, * spaced at 16-bit intervals */ return ((rcbits >> ((3 - (pvo->pvo_pte.slot & 0x3))*16)) & (LPTE_CHG | LPTE_REF)); } static int64_t mps3_pte_synch(struct pvo_entry *pvo) { int64_t retval; mtx_lock(&mps3_table_lock); retval = mps3_pte_synch_locked(pvo); mtx_unlock(&mps3_table_lock); return (retval); } static int64_t mps3_pte_clear(struct pvo_entry *pvo, uint64_t ptebit) { int64_t refchg; struct lpte pte; mtx_lock(&mps3_table_lock); refchg = mps3_pte_synch_locked(pvo); if (refchg < 0) { mtx_unlock(&mps3_table_lock); return (refchg); } moea64_pte_from_pvo(pvo, &pte); pte.pte_lo |= refchg; pte.pte_lo &= ~ptebit; /* XXX: race on RC bits between write and sync. Anything to do? */ lv1_write_htab_entry(mps3_vas_id, pvo->pvo_pte.slot, pte.pte_hi, pte.pte_lo); mtx_unlock(&mps3_table_lock); return (refchg); } static int64_t mps3_pte_unset(struct pvo_entry *pvo) { int64_t refchg; mtx_lock(&mps3_table_lock); refchg = mps3_pte_synch_locked(pvo); if (refchg < 0) { STAT_MOEA64(moea64_pte_overflow--); mtx_unlock(&mps3_table_lock); return (-1); } /* XXX: race on RC bits between unset and sync. Anything to do? */ lv1_write_htab_entry(mps3_vas_id, pvo->pvo_pte.slot, 0, 0); mtx_unlock(&mps3_table_lock); STAT_MOEA64(moea64_pte_valid--); return (refchg & (LPTE_REF | LPTE_CHG)); } static int64_t mps3_pte_insert(struct pvo_entry *pvo) { int result; struct lpte pte, evicted; uint64_t index; if (pvo->pvo_vaddr & PVO_HID) { /* Hypercall needs primary PTEG */ pvo->pvo_vaddr &= ~PVO_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); } pvo->pvo_pte.slot &= ~7UL; moea64_pte_from_pvo(pvo, &pte); evicted.pte_hi = 0; PTESYNC(); mtx_lock(&mps3_table_lock); result = lv1_insert_htab_entry(mps3_vas_id, pvo->pvo_pte.slot, pte.pte_hi, pte.pte_lo, LPTE_LOCKED | LPTE_WIRED, 0, &index, &evicted.pte_hi, &evicted.pte_lo); mtx_unlock(&mps3_table_lock); if (result != 0) { /* No freeable slots in either PTEG? We're hosed. */ panic("mps3_pte_insert: overflow (%d)", result); return (-1); } /* * See where we ended up. */ if ((index & ~7UL) != pvo->pvo_pte.slot) pvo->pvo_vaddr |= PVO_HID; pvo->pvo_pte.slot = index; STAT_MOEA64(moea64_pte_valid++); if (evicted.pte_hi) { KASSERT((evicted.pte_hi & (LPTE_WIRED | LPTE_LOCKED)) == 0, ("Evicted a wired PTE")); STAT_MOEA64(moea64_pte_valid--); STAT_MOEA64(moea64_pte_overflow++); } return (0); } diff --git a/sys/powerpc/pseries/mmu_phyp.c b/sys/powerpc/pseries/mmu_phyp.c index 67d0b5821f4f..3e32b3cb8a83 100644 --- a/sys/powerpc/pseries/mmu_phyp.c +++ b/sys/powerpc/pseries/mmu_phyp.c @@ -1,671 +1,671 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2010 Andreas Tobler * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "phyp-hvcall.h" #define MMU_PHYP_DEBUG 0 #define MMU_PHYP_ID "mmu_phyp: " #if MMU_PHYP_DEBUG #define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__) #define dprintf0(fmt, ...) dprintf(MMU_PHYP_ID fmt, ## __VA_ARGS__) #else #define dprintf(fmt, args...) do { ; } while(0) #define dprintf0(fmt, args...) do { ; } while(0) #endif static struct rmlock mphyp_eviction_lock; /* * Kernel MMU interface */ static void mphyp_install(void); static void mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend); static void mphyp_cpu_bootstrap(int ap); static void *mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes); static int64_t mphyp_pte_synch(struct pvo_entry *pvo); static int64_t mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit); static int64_t mphyp_pte_unset(struct pvo_entry *pvo); static int64_t mphyp_pte_insert(struct pvo_entry *pvo); static int64_t mphyp_pte_unset_sp(struct pvo_entry *pvo); static int64_t mphyp_pte_insert_sp(struct pvo_entry *pvo); static int64_t mphyp_pte_replace_sp(struct pvo_entry *pvo); static struct pmap_funcs mphyp_methods = { .install = mphyp_install, .bootstrap = mphyp_bootstrap, .cpu_bootstrap = mphyp_cpu_bootstrap, .dumpsys_dump_pmap = mphyp_dump_pmap, }; static struct moea64_funcs mmu_phyp_funcs = { .pte_synch = mphyp_pte_synch, .pte_clear = mphyp_pte_clear, .pte_unset = mphyp_pte_unset, .pte_insert = mphyp_pte_insert, .pte_unset_sp = mphyp_pte_unset_sp, .pte_insert_sp = mphyp_pte_insert_sp, .pte_replace_sp = mphyp_pte_replace_sp, }; MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, oea64_mmu); static int brokenkvm = 0; static uint64_t final_pteg_count = 0; static void print_kvm_bug_warning(void *data) { if (brokenkvm) printf("WARNING: Running on a broken hypervisor that does " "not support mandatory H_CLEAR_MOD and H_CLEAR_REF " "hypercalls. Performance will be suboptimal.\n"); } SYSINIT(kvmbugwarn1, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1, print_kvm_bug_warning, NULL); SYSINIT(kvmbugwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_kvm_bug_warning, NULL); static void -mphyp_install() +mphyp_install(void) { char buf[8]; uint32_t prop[2]; uint32_t nptlp, shift = 0, slb_encoding = 0; uint32_t lp_size, lp_encoding; phandle_t dev, node, root; int idx, len, res; bool has_lp; root = OF_peer(0); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } node = OF_child(dev); while (node != 0) { res = OF_getprop(node, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; node = OF_peer(node); } res = OF_getencprop(node, "ibm,pft-size", prop, sizeof(prop)); if (res <= 0) panic("mmu_phyp: unknown PFT size"); final_pteg_count = 1 << prop[1]; res = OF_getencprop(node, "ibm,slb-size", prop, sizeof(prop[0])); if (res > 0) n_slbs = prop[0]; dprintf0("slb-size=%i\n", n_slbs); /* * Scan the large page size property for PAPR compatible machines. * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties' * for the encoding of the property. */ len = OF_getproplen(node, "ibm,segment-page-sizes"); if (len > 0) { /* * We have to use a variable length array on the stack * since we have very limited stack space. */ pcell_t arr[len/sizeof(cell_t)]; res = OF_getencprop(node, "ibm,segment-page-sizes", arr, sizeof(arr)); len /= 4; idx = 0; has_lp = false; while (len > 0) { shift = arr[idx]; slb_encoding = arr[idx + 1]; nptlp = arr[idx + 2]; dprintf0("Segment Page Size: " "%uKB, slb_enc=0x%X: {size, encoding}[%u] =", shift > 10? 1 << (shift-10) : 0, slb_encoding, nptlp); idx += 3; len -= 3; while (len > 0 && nptlp) { lp_size = arr[idx]; lp_encoding = arr[idx+1]; dprintf(" {%uKB, 0x%X}", lp_size > 10? 1 << (lp_size-10) : 0, lp_encoding); if (slb_encoding == SLBV_L && lp_encoding == 0) has_lp = true; if (slb_encoding == SLB_PGSZ_4K_4K && lp_encoding == LP_4K_16M) moea64_has_lp_4k_16m = true; idx += 2; len -= 2; nptlp--; } dprintf("\n"); if (has_lp && moea64_has_lp_4k_16m) break; } if (has_lp) { moea64_large_page_shift = shift; moea64_large_page_size = 1ULL << lp_size; moea64_large_page_mask = moea64_large_page_size - 1; hw_direct_map = 1; printf(MMU_PHYP_ID "Support for hugepages of %uKB detected\n", moea64_large_page_shift > 10? 1 << (moea64_large_page_shift-10) : 0); } else { moea64_large_page_size = 0; moea64_large_page_shift = 0; moea64_large_page_mask = 0; hw_direct_map = 0; printf(MMU_PHYP_ID "Support for hugepages not found\n"); } } moea64_ops = &mmu_phyp_funcs; moea64_install(); } static void mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { struct lpte old; uint64_t vsid; int idx; rm_init(&mphyp_eviction_lock, "pte eviction"); moea64_early_bootstrap(kernelstart, kernelend); moea64_pteg_count = final_pteg_count / sizeof(struct lpteg); /* Clear any old page table entries */ for (idx = 0; idx < moea64_pteg_count*8; idx++) { phyp_pft_hcall(H_READ, 0, idx, 0, 0, &old.pte_hi, &old.pte_lo, &old.pte_lo); vsid = (old.pte_hi << (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) >> 28; if (vsid == VSID_VRMA || vsid == 0 /* Older VRMA */) continue; if (old.pte_hi & LPTE_VALID) phyp_hcall(H_REMOVE, 0, idx, 0); } moea64_mid_bootstrap(kernelstart, kernelend); moea64_late_bootstrap(kernelstart, kernelend); /* Test for broken versions of KVM that don't conform to the spec */ if (phyp_hcall(H_CLEAR_MOD, 0, 0) == H_FUNCTION) brokenkvm = 1; } static void mphyp_cpu_bootstrap(int ap) { struct slb *slb = PCPU_GET(aim.slb); register_t seg0; int i; /* * Install kernel SLB entries */ __asm __volatile ("slbia"); __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); for (i = 0; i < 64; i++) { if (!(slb[i].slbe & SLBE_VALID)) continue; __asm __volatile ("slbmte %0, %1" :: "r"(slb[i].slbv), "r"(slb[i].slbe)); } } static int64_t mphyp_pte_synch(struct pvo_entry *pvo) { struct lpte pte; uint64_t junk; __asm __volatile("ptesync"); phyp_pft_hcall(H_READ, 0, pvo->pvo_pte.slot, 0, 0, &pte.pte_hi, &pte.pte_lo, &junk); if ((pte.pte_hi & LPTE_AVPN_MASK) != ((pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) & LPTE_AVPN_MASK)) return (-1); if (!(pte.pte_hi & LPTE_VALID)) return (-1); return (pte.pte_lo & (LPTE_CHG | LPTE_REF)); } static int64_t mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit) { struct rm_priotracker track; int64_t refchg; uint64_t ptelo, junk; int err __diagused; /* * This involves two steps (synch and clear) so we need the entry * not to change in the middle. We are protected against deliberate * unset by virtue of holding the pmap lock. Protection against * incidental unset (page table eviction) comes from holding the * shared eviction lock. */ PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); rm_rlock(&mphyp_eviction_lock, &track); refchg = mphyp_pte_synch(pvo); if (refchg < 0) { rm_runlock(&mphyp_eviction_lock, &track); return (refchg); } if (brokenkvm) { /* * No way to clear either bit, which is total madness. * Pessimistically claim that, once modified, it stays so * forever and that it is never referenced. */ rm_runlock(&mphyp_eviction_lock, &track); return (refchg & ~LPTE_REF); } if (ptebit & LPTE_CHG) { err = phyp_pft_hcall(H_CLEAR_MOD, 0, pvo->pvo_pte.slot, 0, 0, &ptelo, &junk, &junk); KASSERT(err == H_SUCCESS, ("Error clearing page change bit: %d", err)); refchg |= (ptelo & LPTE_CHG); } if (ptebit & LPTE_REF) { err = phyp_pft_hcall(H_CLEAR_REF, 0, pvo->pvo_pte.slot, 0, 0, &ptelo, &junk, &junk); KASSERT(err == H_SUCCESS, ("Error clearing page reference bit: %d", err)); refchg |= (ptelo & LPTE_REF); } rm_runlock(&mphyp_eviction_lock, &track); return (refchg); } static int64_t mphyp_pte_unset(struct pvo_entry *pvo) { struct lpte pte; uint64_t junk; int err; PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); moea64_pte_from_pvo(pvo, &pte); err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot, pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo, &junk); KASSERT(err == H_SUCCESS || err == H_NOT_FOUND, ("Error removing page: %d", err)); if (err == H_NOT_FOUND) { STAT_MOEA64(moea64_pte_overflow--); return (-1); } return (pte.pte_lo & (LPTE_REF | LPTE_CHG)); } static uintptr_t mphyp_pte_spillable_ident(uintptr_t ptegbase, struct lpte *to_evict) { uint64_t slot, junk, k; struct lpte pt; int i, j; /* Start at a random slot */ i = mftb() % 8; k = -1; for (j = 0; j < 8; j++) { slot = ptegbase + (i + j) % 8; phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pt.pte_hi, &pt.pte_lo, &junk); if ((pt.pte_hi & (LPTE_WIRED | LPTE_BIG)) != 0) continue; /* This is a candidate, so remember it */ k = slot; /* Try to get a page that has not been used lately */ if (!(pt.pte_hi & LPTE_VALID) || !(pt.pte_lo & LPTE_REF)) { memcpy(to_evict, &pt, sizeof(struct lpte)); return (k); } } if (k == -1) return (k); phyp_pft_hcall(H_READ, 0, k, 0, 0, &to_evict->pte_hi, &to_evict->pte_lo, &junk); return (k); } static __inline int64_t mphyp_pte_insert_locked(struct pvo_entry *pvo, struct lpte *pte) { struct lpte evicted; uint64_t index, junk; int64_t result; /* * First try primary hash. */ pvo->pvo_pte.slot &= ~7UL; /* Base slot address */ result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot, pte->pte_hi, pte->pte_lo, &index, &evicted.pte_lo, &junk); if (result == H_SUCCESS) { pvo->pvo_pte.slot = index; return (0); } KASSERT(result == H_PTEG_FULL, ("Page insertion error: %ld " "(ptegidx: %#zx/%#lx, PTE %#lx/%#lx", result, pvo->pvo_pte.slot, moea64_pteg_count, pte->pte_hi, pte->pte_lo)); /* * Next try secondary hash. */ pvo->pvo_vaddr ^= PVO_HID; pte->pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot, pte->pte_hi, pte->pte_lo, &index, &evicted.pte_lo, &junk); if (result == H_SUCCESS) { pvo->pvo_pte.slot = index; return (0); } KASSERT(result == H_PTEG_FULL, ("Secondary page insertion error: %ld", result)); return (-1); } static __inline int64_t mphyp_pte_evict_and_insert_locked(struct pvo_entry *pvo, struct lpte *pte) { struct lpte evicted; uint64_t index, junk, lastptelo; int64_t result; evicted.pte_hi = 0; index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted); if (index == -1L) { /* Try other hash table? */ pvo->pvo_vaddr ^= PVO_HID; pte->pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted); } if (index == -1L) { /* No freeable slots in either PTEG? We're hosed. */ rm_wunlock(&mphyp_eviction_lock); panic("mphyp_pte_insert: overflow"); return (-1); } /* Victim acquired: update page before waving goodbye */ if (evicted.pte_hi & LPTE_VALID) { result = phyp_pft_hcall(H_REMOVE, H_AVPN, index, evicted.pte_hi & LPTE_AVPN_MASK, 0, &junk, &lastptelo, &junk); STAT_MOEA64(moea64_pte_overflow++); KASSERT(result == H_SUCCESS || result == H_NOT_FOUND, ("Error evicting page: %d", (int)result)); } /* * Set the new PTE. */ result = phyp_pft_hcall(H_ENTER, H_EXACT, index, pte->pte_hi, pte->pte_lo, &index, &evicted.pte_lo, &junk); pvo->pvo_pte.slot = index; if (result == H_SUCCESS) return (0); rm_wunlock(&mphyp_eviction_lock); panic("Page replacement error: %ld", result); return (result); } static int64_t mphyp_pte_insert(struct pvo_entry *pvo) { struct rm_priotracker track; int64_t ret; struct lpte pte; PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); /* Initialize PTE */ moea64_pte_from_pvo(pvo, &pte); /* Make sure further insertion is locked out during evictions */ rm_rlock(&mphyp_eviction_lock, &track); ret = mphyp_pte_insert_locked(pvo, &pte); rm_runlock(&mphyp_eviction_lock, &track); if (ret == -1) { /* * Out of luck. Find a PTE to sacrifice. */ /* Lock out all insertions for a bit */ rm_wlock(&mphyp_eviction_lock); ret = mphyp_pte_evict_and_insert_locked(pvo, &pte); rm_wunlock(&mphyp_eviction_lock); /* All clear */ } return (ret); } static void * mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes) { struct dump_context *dctx; struct lpte p, *pbuf; int bufidx; uint64_t junk; u_long ptex, ptex_end; dctx = (struct dump_context *)ctx; pbuf = (struct lpte *)buf; bufidx = 0; ptex = dctx->ptex; ptex_end = ptex + dctx->blksz / sizeof(struct lpte); ptex_end = MIN(ptex_end, dctx->ptex_end); *nbytes = (ptex_end - ptex) * sizeof(struct lpte); if (*nbytes == 0) return (NULL); for (; ptex < ptex_end; ptex++) { phyp_pft_hcall(H_READ, 0, ptex, 0, 0, &p.pte_hi, &p.pte_lo, &junk); pbuf[bufidx++] = p; } dctx->ptex = ptex; return (buf); } static int64_t mphyp_pte_unset_sp(struct pvo_entry *pvo) { struct lpte pte; uint64_t junk, refchg; int err; vm_offset_t eva; pmap_t pm __diagused; pm = pvo->pvo_pmap; PMAP_LOCK_ASSERT(pm, MA_OWNED); KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo))); refchg = 0; eva = PVO_VADDR(pvo) + HPT_SP_SIZE; for (; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { moea64_pte_from_pvo(pvo, &pte); err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot, pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo, &junk); KASSERT(err == H_SUCCESS || err == H_NOT_FOUND, ("Error removing page: %d", err)); if (err == H_NOT_FOUND) STAT_MOEA64(moea64_pte_overflow--); refchg |= pte.pte_lo & (LPTE_REF | LPTE_CHG); } return (refchg); } static int64_t mphyp_pte_insert_sp(struct pvo_entry *pvo) { struct rm_priotracker track; int64_t ret; struct lpte pte; vm_offset_t eva; pmap_t pm __diagused; pm = pvo->pvo_pmap; PMAP_LOCK_ASSERT(pm, MA_OWNED); KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo))); eva = PVO_VADDR(pvo) + HPT_SP_SIZE; /* Make sure further insertion is locked out during evictions */ rm_rlock(&mphyp_eviction_lock, &track); for (; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { /* Initialize PTE */ moea64_pte_from_pvo(pvo, &pte); ret = mphyp_pte_insert_locked(pvo, &pte); if (ret == -1) { /* * Out of luck. Find a PTE to sacrifice. */ /* Lock out all insertions for a bit */ rm_runlock(&mphyp_eviction_lock, &track); rm_wlock(&mphyp_eviction_lock); mphyp_pte_evict_and_insert_locked(pvo, &pte); rm_wunlock(&mphyp_eviction_lock); /* All clear */ rm_rlock(&mphyp_eviction_lock, &track); } } rm_runlock(&mphyp_eviction_lock, &track); return (0); } static int64_t mphyp_pte_replace_sp(struct pvo_entry *pvo) { int64_t refchg; refchg = mphyp_pte_unset_sp(pvo); mphyp_pte_insert_sp(pvo); return (refchg); }