Index: head/sys/alpha/alpha/pmap.c =================================================================== --- head/sys/alpha/alpha/pmap.c (revision 101345) +++ head/sys/alpha/alpha/pmap.c (revision 101346) @@ -1,3265 +1,3258 @@ /* * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 1998 Doug Rabson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap * $FreeBSD$ */ /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ /* * Notes for alpha pmap. * * On alpha, pm_pdeobj will hold lev1, lev2 and lev3 page tables. * Indices from 0 to NUSERLEV3MAPS-1 will map user lev3 page tables, * indices from NUSERLEV3MAPS to NUSERLEV3MAPS+NUSERLEV2MAPS-1 will * map user lev2 page tables and index NUSERLEV3MAPS+NUSERLEV2MAPS * will map the lev1 page table. The lev1 table will self map at * address VADDR(PTLEV1I,0,0). * * The vm_object kptobj holds the kernel page tables on i386 (62 or 63 * of them, depending on whether the system is SMP). On alpha, kptobj * will hold the lev3 and lev2 page tables for K1SEG. Indices 0 to * NKLEV3MAPS-1 will map kernel lev3 page tables and indices * NKLEV3MAPS to NKLEV3MAPS+NKLEV2MAPS will map lev2 page tables. (XXX * should the kernel Lev1map be inserted into this object?). * * pvtmmap is not needed for alpha since K0SEG maps all of physical * memory. * * * alpha virtual memory map: * * * Address Lev1 index * * --------------------------------- * 0000000000000000 | | 0 * | | * | | * | | * | | * --- --- * User space (USEG) * --- --- * | | * | | * | | * | | * 000003ffffffffff | | 511=UMAXLEV1I * --------------------------------- * fffffc0000000000 | | 512=K0SEGLEV1I * | Kernel code/data/bss | * | | * | | * | | * --- --- * K0SEG * --- --- * | | * | 1-1 physical/virtual | * | | * | | * fffffdffffffffff | | * --------------------------------- * fffffe0000000000 | | 768=K1SEGLEV1I * | Kernel dynamic data | * | | * | | * | | * --- --- * K1SEG * --- --- * | | * | mapped by ptes | * | | * | | * fffffff7ffffffff | | * --------------------------------- * fffffffe00000000 | | 1023=PTLEV1I * | PTmap (pte self map) | * ffffffffffffffff | | * --------------------------------- * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if defined(DIAGNOSTIC) #define PMAP_DIAGNOSTIC #endif #define MINPV 2048 #if 0 #define PMAP_DIAGNOSTIC #define PMAP_DEBUG #endif #if !defined(PMAP_DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif /* * Some macros for manipulating virtual addresses */ #define ALPHA_L1SIZE (1L << ALPHA_L1SHIFT) #define ALPHA_L2SIZE (1L << ALPHA_L2SHIFT) #define alpha_l1trunc(va) ((va) & ~(ALPHA_L1SIZE-1)) #define alpha_l2trunc(va) ((va) & ~(ALPHA_L2SIZE-1)) /* * Get PDEs and PTEs for user/kernel address space */ #define pmap_pte_w(pte) ((*(pte) & PG_W) != 0) #define pmap_pte_managed(pte) ((*(pte) & PG_MANAGED) != 0) #define pmap_pte_v(pte) ((*(pte) & PG_V) != 0) #define pmap_pte_pa(pte) alpha_ptob(ALPHA_PTE_TO_PFN(*(pte))) #define pmap_pte_prot(pte) (*(pte) & PG_PROT) #define pmap_pte_set_w(pte, v) ((v)?(*pte |= PG_W):(*pte &= ~PG_W)) #define pmap_pte_set_prot(pte, v) ((*pte &= ~PG_PROT), (*pte |= (v))) /* * Given a map and a machine independent protection code, * convert to an alpha protection code. */ #define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p]) int protection_codes[2][8]; /* * Return non-zero if this pmap is currently active */ #define pmap_isactive(pmap) (pmap->pm_active) /* * Extract level 1, 2 and 3 page table indices from a va */ #define PTMASK ((1 << ALPHA_PTSHIFT) - 1) #define pmap_lev1_index(va) (((va) >> ALPHA_L1SHIFT) & PTMASK) #define pmap_lev2_index(va) (((va) >> ALPHA_L2SHIFT) & PTMASK) #define pmap_lev3_index(va) (((va) >> ALPHA_L3SHIFT) & PTMASK) /* * Given a physical address, construct a pte */ #define pmap_phys_to_pte(pa) ALPHA_PTE_FROM_PFN(alpha_btop(pa)) /* * Given a page frame number, construct a k0seg va */ #define pmap_k0seg_to_pfn(va) alpha_btop(ALPHA_K0SEG_TO_PHYS(va)) /* * Given a pte, construct a k0seg va */ #define pmap_k0seg_to_pte(va) ALPHA_PTE_FROM_PFN(pmap_k0seg_to_pfn(va)) /* * Lev1map: * * Kernel level 1 page table. This maps all kernel level 2 * page table pages, and is used as a template for all user * pmap level 1 page tables. When a new user level 1 page * table is allocated, all Lev1map PTEs for kernel addresses * are copied to the new map. * * Lev2map: * * Initial set of kernel level 2 page table pages. These * map the kernel level 3 page table pages. As kernel * level 3 page table pages are added, more level 2 page * table pages may be added to map them. These pages are * never freed. * * Lev3map: * * Initial set of kernel level 3 page table pages. These * map pages in K1SEG. More level 3 page table pages may * be added at run-time if additional K1SEG address space * is required. These pages are never freed. * * Lev2mapsize: * * Number of entries in the initial Lev2map. * * Lev3mapsize: * * Number of entries in the initial Lev3map. * * NOTE: When mappings are inserted into the kernel pmap, all * level 2 and level 3 page table pages must already be allocated * and mapped into the parent page table. */ pt_entry_t *Lev1map, *Lev2map, *Lev3map; vm_size_t Lev2mapsize, Lev3mapsize; /* * Statically allocated kernel pmap */ struct pmap kernel_pmap_store; vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ static vm_object_t kptobj; static int nklev3, nklev2; vm_offset_t kernel_vm_end; /* * Data for the ASN allocator */ static int pmap_maxasn; static pmap_t pmap_active[MAXCPU]; static LIST_HEAD(,pmap) allpmaps; static struct mtx allpmaps_lock; /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; static int pmap_pagedaemon_waken = 0; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); static void alpha_protection_init(void); static void pmap_changebit(vm_page_t m, int bit, boolean_t setem); static void pmap_remove_all(vm_page_t m); static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte); static int pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); static int pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); static int pmap_release_free_page(pmap_t pmap, vm_page_t p); static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex); static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); static void *pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); #ifdef SMP static void pmap_invalidate_page_action(void *arg); static void pmap_invalidate_all_action(void *arg); #endif /* * Routine: pmap_lev1pte * Function: * Extract the level 1 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev1pte(pmap_t pmap, vm_offset_t va) { if (!pmap) return 0; return &pmap->pm_lev1[pmap_lev1_index(va)]; } /* * Routine: pmap_lev2pte * Function: * Extract the level 2 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev2pte(pmap_t pmap, vm_offset_t va) { pt_entry_t* l1pte; pt_entry_t* l2map; l1pte = pmap_lev1pte(pmap, va); if (!pmap_pte_v(l1pte)) return 0; l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); return &l2map[pmap_lev2_index(va)]; } /* * Routine: pmap_lev3pte * Function: * Extract the level 3 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev3pte(pmap_t pmap, vm_offset_t va) { pt_entry_t* l2pte; pt_entry_t* l3map; l2pte = pmap_lev2pte(pmap, va); if (!l2pte || !pmap_pte_v(l2pte)) return 0; l3map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte)); return &l3map[pmap_lev3_index(va)]; } vm_offset_t pmap_steal_memory(vm_size_t size) { vm_size_t bank_size; vm_offset_t pa, va; size = round_page(size); bank_size = phys_avail[1] - phys_avail[0]; while (size > bank_size) { int i; for (i = 0; phys_avail[i+2]; i+= 2) { phys_avail[i] = phys_avail[i+2]; phys_avail[i+1] = phys_avail[i+3]; } phys_avail[i] = 0; phys_avail[i+1] = 0; if (!phys_avail[0]) panic("pmap_steal_memory: out of memory"); bank_size = phys_avail[1] - phys_avail[0]; } pa = phys_avail[0]; phys_avail[0] += size; va = ALPHA_PHYS_TO_K0SEG(pa); bzero((caddr_t) va, size); return va; } extern pt_entry_t rom_pte; /* XXX */ extern int prom_mapped; /* XXX */ /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t ptaddr, u_int maxasn) { pt_entry_t newpte; int i; /* * Setup ASNs. PCPU_GET(next_asn) and PCPU_GET(current_asngen) are set * up already. */ pmap_maxasn = maxasn; /* * Allocate a level 1 map for the kernel. */ Lev1map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); /* * Allocate a level 2 map for the kernel */ Lev2map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); Lev2mapsize = PAGE_SIZE; /* * Allocate some level 3 maps for the kernel */ Lev3map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE*NKPT); Lev3mapsize = NKPT * PAGE_SIZE; /* Map all of the level 2 maps */ for (i = 0; i < howmany(Lev2mapsize, PAGE_SIZE); i++) { unsigned long pfn = pmap_k0seg_to_pfn((vm_offset_t) Lev2map) + i; newpte = ALPHA_PTE_FROM_PFN(pfn); newpte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_W; Lev1map[K1SEGLEV1I + i] = newpte; } /* Setup the mapping for the prom console */ { if (pmap_uses_prom_console()) { /* XXX save old pte so that we can remap prom if necessary */ rom_pte = *(pt_entry_t *)ptaddr & ~PG_ASM; /* XXX */ } prom_mapped = 0; /* * Actually, this code lies. The prom is still mapped, and will * remain so until the context switch after alpha_init() returns. * Printfs using the firmware before then will end up frobbing * Lev1map unnecessarily, but that's OK. */ } /* * Level 1 self mapping. * * Don't set PG_ASM since the self-mapping is different for each * address space. */ newpte = pmap_k0seg_to_pte((vm_offset_t) Lev1map); newpte |= PG_V | PG_KRE | PG_KWE; Lev1map[PTLEV1I] = newpte; /* Map all of the level 3 maps */ for (i = 0; i < howmany(Lev3mapsize, PAGE_SIZE); i++) { unsigned long pfn = pmap_k0seg_to_pfn((vm_offset_t) Lev3map) + i; newpte = ALPHA_PTE_FROM_PFN(pfn); newpte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_W; Lev2map[i] = newpte; } avail_start = phys_avail[0]; for (i = 0; phys_avail[i+2]; i+= 2) ; avail_end = phys_avail[i+1]; virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VPTBASE; /* * Initialize protection array. */ alpha_protection_init(); /* * Initialize the kernel pmap (which is statically allocated). */ kernel_pmap->pm_lev1 = Lev1map; kernel_pmap->pm_active = ~0; kernel_pmap->pm_asn[alpha_pal_whami()].asn = 0; kernel_pmap->pm_asn[alpha_pal_whami()].gen = 1; TAILQ_INIT(&kernel_pmap->pm_pvlist); nklev3 = NKPT; nklev2 = 1; /* * Initialize list of pmaps. */ LIST_INIT(&allpmaps); LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); /* * Set up proc0's PCB such that the ptbr points to the right place * and has the kernel pmap's. */ thread0.td_pcb->pcb_hw.apcb_ptbr = ALPHA_K0SEG_TO_PHYS((vm_offset_t)Lev1map) >> PAGE_SHIFT; thread0.td_pcb->pcb_hw.apcb_asn = 0; } int pmap_uses_prom_console() { int cputype; cputype = hwrpb->rpb_type; return (cputype == ST_DEC_21000 || ST_DEC_4100); return 0; } static void * pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { *flags = UMA_SLAB_PRIV; return (void *)kmem_alloc(kernel_map, bytes); } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. * pmap_init has been enhanced to support in a fairly consistant * way, discontiguous physical memory. */ void pmap_init(phys_start, phys_end) vm_offset_t phys_start, phys_end; { int i; int initial_pvs; /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ for(i = 0; i < vm_page_array_size; i++) { vm_page_t m; m = &vm_page_array[i]; TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; m->md.pv_flags = 0; } /* * init the pv free list */ initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pvzone, pmap_allocf); uma_prealloc(pvzone, initial_pvs); /* * object for kernel page table pages */ kptobj = vm_object_allocate(OBJT_DEFAULT, NKLEV3MAPS + NKLEV2MAPS); /* * Now it is safe to enable pv_table recording. */ pmap_initialized = TRUE; } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2() { int shpgperproc = PMAP_SHPGPERPROC; TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); } /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ static void pmap_invalidate_asn(pmap_t pmap) { pmap->pm_asn[PCPU_GET(cpuid)].gen = 0; } struct pmap_invalidate_page_arg { pmap_t pmap; vm_offset_t va; }; static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { #ifdef SMP struct pmap_invalidate_page_arg arg; arg.pmap = pmap; arg.va = va; smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *) &arg); } static void pmap_invalidate_page_action(void *arg) { pmap_t pmap = ((struct pmap_invalidate_page_arg *) arg)->pmap; vm_offset_t va = ((struct pmap_invalidate_page_arg *) arg)->va; #endif if (pmap->pm_active & PCPU_GET(cpumask)) { ALPHA_TBIS(va); alpha_pal_imb(); /* XXX overkill? */ } else { pmap_invalidate_asn(pmap); } } static void pmap_invalidate_all(pmap_t pmap) { #ifdef SMP smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *) pmap); } static void pmap_invalidate_all_action(void *arg) { pmap_t pmap = (pmap_t) arg; #endif if (pmap->pm_active & PCPU_GET(cpumask)) { ALPHA_TBIA(); alpha_pal_imb(); /* XXX overkill? */ } else pmap_invalidate_asn(pmap); } static void pmap_get_asn(pmap_t pmap) { if (PCPU_GET(next_asn) > pmap_maxasn) { /* * Start a new ASN generation. * * Invalidate all per-process mappings and I-cache */ PCPU_SET(next_asn, 0); PCPU_SET(current_asngen, (PCPU_GET(current_asngen) + 1) & ASNGEN_MASK); if (PCPU_GET(current_asngen) == 0) { /* * Clear the pm_asn[].gen of all pmaps. * This is safe since it is only called from * pmap_activate after it has deactivated * the old pmap and it only affects this cpu. */ pmap_t tpmap; #ifdef PMAP_DIAGNOSTIC printf("pmap_get_asn: generation rollover\n"); #endif PCPU_SET(current_asngen, 1); mtx_lock_spin(&allpmaps_lock); LIST_FOREACH(tpmap, &allpmaps, pm_list) { tpmap->pm_asn[PCPU_GET(cpuid)].gen = 0; } mtx_unlock_spin(&allpmaps_lock); } /* * Since we are about to start re-using ASNs, we must * clear out the TLB and the I-cache since they are tagged * with the ASN. */ ALPHA_TBIAP(); alpha_pal_imb(); /* XXX overkill? */ } pmap->pm_asn[PCPU_GET(cpuid)].asn = PCPU_GET(next_asn); PCPU_SET(next_asn, PCPU_GET(next_asn) + 1); pmap->pm_asn[PCPU_GET(cpuid)].gen = PCPU_GET(current_asngen); } /*************************************************** * Low level helper routines..... ***************************************************/ /* * this routine defines the region(s) of memory that should * not be tested for the modified bit. */ static PMAP_INLINE int pmap_track_modified(vm_offset_t va) { if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) return 1; else return 0; } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_offset_t pmap_extract(pmap, va) register pmap_t pmap; vm_offset_t va; { pt_entry_t* pte = pmap_lev3pte(pmap, va); if (pte) return alpha_ptob(ALPHA_PTE_TO_PFN(*pte)); else return 0; } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; pt_entry_t *pte; for (i = 0; i < count; i++) { vm_offset_t tva = va + i * PAGE_SIZE; pt_entry_t npte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m[i])) | PG_ASM | PG_KRE | PG_KWE | PG_V; pt_entry_t opte; pte = vtopte(tva); opte = *pte; *pte = npte; if (opte) pmap_invalidate_page(kernel_pmap, tva); } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(va, count) vm_offset_t va; int count; { int i; register pt_entry_t *pte; for (i = 0; i < count; i++) { pte = vtopte(va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); va += PAGE_SIZE; } } /* * add a wired page to the kva * note that in order for the mapping to take effect -- you * should do a invltlb after doing the pmap_kenter... */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_offset_t pa) { pt_entry_t *pte; pt_entry_t npte, opte; npte = pmap_phys_to_pte(pa) | PG_ASM | PG_KRE | PG_KWE | PG_V; pte = vtopte(va); opte = *pte; *pte = npte; if (opte) pmap_invalidate_page(kernel_pmap, va); } /* * remove a page from the kernel pagetables */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { register pt_entry_t *pte; pte = vtopte(va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { return ALPHA_PHYS_TO_K0SEG(start); } static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; retry: m = vm_page_lookup(object, pindex); if (m && vm_page_sleep_busy(m, FALSE, "pplookp")) goto retry; return m; } /* * Create the kernel stack for a new thread. * This routine directly affects the fork perf for a process and thread. */ void pmap_new_thread(struct thread *td) { int i; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; pt_entry_t *ptek, oldpte; /* * allocate object for the kstack */ ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; #ifdef KSTACK_GUARD /* get a kernel virtual address for the kstack for this thread */ ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE); if (ks == NULL) panic("pmap_new_thread: kstack allocation failed"); /* Set the first page to be the unmapped guard page. */ ptek = vtopte(ks); oldpte = *ptek; *ptek = 0; if (oldpte) pmap_invalidate_page(kernel_pmap, ks); /* move to the next page, which is where the real stack starts. */ ks += PAGE_SIZE; td->td_kstack = ks; ptek++; #else /* get a kernel virtual address for the kstack for this thread */ ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE); if (ks == NULL) panic("pmap_new_thread: kstack allocation failed"); td->td_kstack = ks; ptek = vtopte(ks); #endif /* * For the length of the stack, link in a real page of ram for each * page of stack. */ for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page */ - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(ksobj, i, + VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); /* - * Wire the page - */ - m->wire_count++; - cnt.v_wire_count++; - - /* * Enter the page into the kernel address space. */ oldpte = ptek[i]; ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_ASM | PG_KRE | PG_KWE | PG_V; if (oldpte) pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } /* * Dispose the kernel stack for a thread that has exited. * This routine directly impacts the exit perf of a thread. */ void pmap_dispose_thread(td) struct thread *td; { int i; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; pt_entry_t *ptek; ksobj = td->td_kstack_obj; ks = td->td_kstack; ptek = vtopte(ks); for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_dispose_thread: kstack already missing?"); vm_page_busy(m); ptek[i] = 0; pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE); vm_page_lock_queues(); vm_page_unwire(m, 0); vm_page_free(m); vm_page_unlock_queues(); } /* * Free the space that this stack was mapped to in the kernel * address map. */ #ifdef KSTACK_GUARD kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE); #else kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE); #endif vm_object_deallocate(ksobj); } /* * Allow the kernel stack for a thread to be prejudicially paged out. */ void pmap_swapout_thread(td) struct thread *td; { int i; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; /* * Make sure we aren't fpcurthread. */ alpha_fpstate_save(td, 1); ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_swapout_thread: kstack already missing?"); vm_page_lock_queues(); vm_page_dirty(m); vm_page_unwire(m, 0); vm_page_unlock_queues(); pmap_kremove(ks + i * PAGE_SIZE); } } /* * Bring the kernel stack for a specified thread back in. */ void pmap_swapin_thread(td) struct thread *td; { int i, rv; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(ksobj, &m, 1, 0); if (rv != VM_PAGER_OK) panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); m = vm_page_lookup(ksobj, i); m->valid = VM_PAGE_BITS_ALL; } vm_page_lock_queues(); vm_page_wire(m); vm_page_wakeup(m); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); vm_page_unlock_queues(); } /* * The pcb may be at a different physical address now so cache the * new address. */ td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb); } /*************************************************** * Page table page management routines..... ***************************************************/ /* * This routine unholds page table pages, and if the hold count * drops to zero, then it decrements the wire count. */ static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) { while (vm_page_sleep_busy(m, FALSE, "pmuwpt")) ; if (m->hold_count == 0) { vm_offset_t pteva; pt_entry_t* pte; /* * unmap the page table page */ if (m->pindex >= NUSERLEV3MAPS) { /* Level 2 page table */ pte = pmap_lev1pte(pmap, va); pteva = (vm_offset_t) PTlev2 + alpha_ptob(m->pindex - NUSERLEV3MAPS); } else { /* Level 3 page table */ pte = pmap_lev2pte(pmap, va); pteva = (vm_offset_t) PTmap + alpha_ptob(m->pindex); } *pte = 0; if (m->pindex < NUSERLEV3MAPS) { /* unhold the level 2 page table */ vm_page_t lev2pg; lev2pg = pmap_page_lookup(pmap->pm_pteobj, NUSERLEV3MAPS + pmap_lev1_index(va)); vm_page_unhold(lev2pg); if (lev2pg->hold_count == 0) _pmap_unwire_pte_hold(pmap, va, lev2pg); } --pmap->pm_stats.resident_count; /* * Do a invltlb to make the invalidated mapping * take effect immediately. */ pmap_invalidate_page(pmap, pteva); if (pmap->pm_ptphint == m) pmap->pm_ptphint = NULL; /* * If the page is finally unwired, simply free it. */ --m->wire_count; if (m->wire_count == 0) { vm_page_flash(m); vm_page_busy(m); vm_page_free_zero(m); --cnt.v_wire_count; } return 1; } return 0; } static PMAP_INLINE int pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) { vm_page_unhold(m); if (m->hold_count == 0) return _pmap_unwire_pte_hold(pmap, va, m); else return 0; } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) { unsigned ptepindex; if (va >= VM_MAXUSER_ADDRESS) return 0; if (mpte == NULL) { ptepindex = (va >> ALPHA_L2SHIFT); if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; } else { mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex); pmap->pm_ptphint = mpte; } } return pmap_unwire_pte_hold(pmap, va, mpte); } void pmap_pinit0(pmap) struct pmap *pmap; { int i; pmap->pm_lev1 = Lev1map; pmap->pm_ptphint = NULL; pmap->pm_active = 0; for (i = 0; i < MAXCPU; i++) { pmap->pm_asn[i].asn = 0; pmap->pm_asn[i].gen = 0; } TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN | MTX_QUIET); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(pmap) register struct pmap *pmap; { vm_page_t lev1pg; int i; /* * allocate object for the ptes */ if (pmap->pm_pteobj == NULL) pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, NUSERLEV3MAPS + NUSERLEV2MAPS + 1); /* * allocate the page directory page */ lev1pg = vm_page_grab(pmap->pm_pteobj, NUSERLEV3MAPS + NUSERLEV2MAPS, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); lev1pg->wire_count = 1; ++cnt.v_wire_count; vm_page_flag_clear(lev1pg, PG_MAPPED | PG_BUSY); /* not mapped normally */ lev1pg->valid = VM_PAGE_BITS_ALL; pmap->pm_lev1 = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(lev1pg)); if ((lev1pg->flags & PG_ZERO) == 0) bzero(pmap->pm_lev1, PAGE_SIZE); /* install self-referential address mapping entry (not PG_ASM) */ pmap->pm_lev1[PTLEV1I] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(lev1pg)) | PG_V | PG_KRE | PG_KWE; pmap->pm_ptphint = NULL; pmap->pm_active = 0; for (i = 0; i < MAXCPU; i++) { pmap->pm_asn[i].asn = 0; pmap->pm_asn[i].gen = 0; } TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); mtx_lock_spin(&allpmaps_lock); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); } /* * Wire in kernel global address entries. To avoid a race condition * between pmap initialization and pmap_growkernel, this procedure * should be called after the vmspace is attached to the process * but before this pmap is activated. */ void pmap_pinit2(pmap) struct pmap *pmap; { bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); } static int pmap_release_free_page(pmap_t pmap, vm_page_t p) { pt_entry_t* pte; pt_entry_t* l2map; if (p->pindex >= NUSERLEV3MAPS + NUSERLEV2MAPS) /* level 1 page table */ pte = &pmap->pm_lev1[PTLEV1I]; else if (p->pindex >= NUSERLEV3MAPS) /* level 2 page table */ pte = &pmap->pm_lev1[p->pindex - NUSERLEV3MAPS]; else { /* level 3 page table */ pte = &pmap->pm_lev1[p->pindex >> ALPHA_PTSHIFT]; l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(pte)); pte = &l2map[p->pindex & ((1 << ALPHA_PTSHIFT) - 1)]; } /* * This code optimizes the case of freeing non-busy * page-table pages. Those pages are zero now, and * might as well be placed directly into the zero queue. */ if (vm_page_sleep_busy(p, FALSE, "pmaprl")) return 0; vm_page_busy(p); /* * Remove the page table page from the processes address space. */ *pte = 0; pmap->pm_stats.resident_count--; #ifdef PMAP_DEBUG if (p->hold_count) { panic("pmap_release: freeing held page table page"); } #endif /* * Level1 pages need to have the kernel * stuff cleared, so they can go into the zero queue also. */ if (p->pindex == NUSERLEV3MAPS + NUSERLEV2MAPS) bzero(pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex)) pmap->pm_ptphint = NULL; #ifdef PMAP_DEBUG { u_long *lp = (u_long*) ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(p)); u_long *ep = (u_long*) ((char*) lp + PAGE_SIZE); for (; lp < ep; lp++) if (*lp != 0) panic("pmap_release_free_page: page not zero"); } #endif p->wire_count--; cnt.v_wire_count--; vm_page_free_zero(p); return 1; } /* * this routine is called if the page table page is not * mapped correctly. */ static vm_page_t _pmap_allocpte(pmap, ptepindex) pmap_t pmap; unsigned ptepindex; { pt_entry_t* pte; vm_offset_t ptepa; vm_page_t m; /* * Find or fabricate a new pagetable page */ m = vm_page_grab(pmap->pm_pteobj, ptepindex, VM_ALLOC_ZERO | VM_ALLOC_RETRY); KASSERT(m->queue == PQ_NONE, ("_pmap_allocpte: %p->queue != PQ_NONE", m)); if (m->wire_count == 0) cnt.v_wire_count++; m->wire_count++; /* * Increment the hold count for the page table page * (denoting a new mapping.) */ m->hold_count++; /* * Map the pagetable page into the process address space, if * it isn't already there. */ pmap->pm_stats.resident_count++; ptepa = VM_PAGE_TO_PHYS(m); if (ptepindex >= NUSERLEV3MAPS) { pte = &pmap->pm_lev1[ptepindex - NUSERLEV3MAPS]; } else { int l1index = ptepindex >> ALPHA_PTSHIFT; pt_entry_t* l1pte = &pmap->pm_lev1[l1index]; pt_entry_t* l2map; if (!pmap_pte_v(l1pte)) _pmap_allocpte(pmap, NUSERLEV3MAPS + l1index); else { vm_page_t l2page = pmap_page_lookup(pmap->pm_pteobj, NUSERLEV3MAPS + l1index); l2page->hold_count++; } l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); pte = &l2map[ptepindex & ((1 << ALPHA_PTSHIFT) - 1)]; } *pte = pmap_phys_to_pte(ptepa) | PG_KRE | PG_KWE | PG_V; /* * Set the page table hint */ pmap->pm_ptphint = m; if ((m->flags & PG_ZERO) == 0) bzero((caddr_t) ALPHA_PHYS_TO_K0SEG(ptepa), PAGE_SIZE); m->valid = VM_PAGE_BITS_ALL; vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); return m; } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va) { unsigned ptepindex; pt_entry_t* lev2pte; vm_page_t m; /* * Calculate pagetable page index */ ptepindex = va >> (PAGE_SHIFT + ALPHA_PTSHIFT); /* * Get the level2 entry */ lev2pte = pmap_lev2pte(pmap, va); /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (lev2pte && pmap_pte_v(lev2pte)) { /* * In order to get the page table page, try the * hint first. */ if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { m = pmap->pm_ptphint; } else { m = pmap_page_lookup(pmap->pm_pteobj, ptepindex); pmap->pm_ptphint = m; } m->hold_count++; return m; } /* * Here if the pte page isn't mapped, or if it has been deallocated. */ return _pmap_allocpte(pmap, ptepindex); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t p,n,lev1pg; vm_object_t object = pmap->pm_pteobj; int curgeneration; #if defined(DIAGNOSTIC) if (object->ref_count != 1) panic("pmap_release: pteobj reference count != 1"); #endif lev1pg = NULL; retry: curgeneration = object->generation; for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) { n = TAILQ_NEXT(p, listq); if (p->pindex >= NUSERLEV3MAPS) { continue; } while (1) { if (!pmap_release_free_page(pmap, p) && (object->generation != curgeneration)) goto retry; } } for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) { n = TAILQ_NEXT(p, listq); if (p->pindex < NUSERLEV3MAPS) { /* can this happen? maybe panic */ goto retry; } if (p->pindex >= NUSERLEV3MAPS + NUSERLEV2MAPS) { lev1pg = p; continue; } while (1) { if (!pmap_release_free_page(pmap, p) && (object->generation != curgeneration)) goto retry; } } if (lev1pg && !pmap_release_free_page(pmap, lev1pg)) goto retry; mtx_lock_spin(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { /* XXX come back to this */ struct pmap *pmap; pt_entry_t* pte; pt_entry_t newlev1, newlev2; vm_offset_t pa; vm_page_t nkpg; critical_enter(); if (kernel_vm_end == 0) { kernel_vm_end = VM_MIN_KERNEL_ADDRESS;; /* Count the level 2 page tables */ nklev2 = 0; nklev3 = 0; while (pmap_pte_v(pmap_lev1pte(kernel_pmap, kernel_vm_end))) { nklev2++; nklev3 += (1L << ALPHA_PTSHIFT); kernel_vm_end += ALPHA_L1SIZE; } /* Count the level 3 page tables in the last level 2 page table */ kernel_vm_end -= ALPHA_L1SIZE; nklev3 -= (1 << ALPHA_PTSHIFT); while (pmap_pte_v(pmap_lev2pte(kernel_pmap, kernel_vm_end))) { nklev3++; kernel_vm_end += ALPHA_L2SIZE; } } addr = (addr + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); while (kernel_vm_end < addr) { /* * If the level 1 pte is invalid, allocate a new level 2 page table */ pte = pmap_lev1pte(kernel_pmap, kernel_vm_end); if (!pmap_pte_v(pte)) { int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I; nkpg = vm_page_alloc(kptobj, pindex, VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); printf("pmap_growkernel: growing to %lx\n", addr); printf("pmap_growkernel: adding new level2 page table\n"); nklev2++; pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); newlev1 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; mtx_lock_spin(&allpmaps_lock); LIST_FOREACH(pmap, &allpmaps, pm_list) { *pmap_lev1pte(pmap, kernel_vm_end) = newlev1; } mtx_unlock_spin(&allpmaps_lock); *pte = newlev1; pmap_invalidate_all(kernel_pmap); } /* * If the level 2 pte is invalid, allocate a new level 3 page table */ pte = pmap_lev2pte(kernel_pmap, kernel_vm_end); if (pmap_pte_v(pte)) { kernel_vm_end = (kernel_vm_end + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); continue; } /* * This index is bogus, but out of the way */ nkpg = vm_page_alloc(kptobj, nklev3, VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nklev3++; pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; *pte = newlev2; kernel_vm_end = (kernel_vm_end + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); } critical_exit(); } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t get_pv_entry(void) { pv_entry_count++; if (pv_entry_high_water && (pv_entry_count > pv_entry_high_water) && (pmap_pagedaemon_waken == 0)) { pmap_pagedaemon_waken = 1; wakeup (&vm_pages_needed); } return uma_zalloc(pvzone, M_NOWAIT); } /* * This routine is very drastic, but can save the system * in a pinch. */ void pmap_collect() { int i; vm_page_t m; static int warningdone = 0; if (pmap_pagedaemon_waken == 0) return; if (warningdone < 5) { printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); warningdone++; } for(i = 0; i < vm_page_array_size; i++) { m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || (m->flags & (PG_BUSY | PG_UNMANAGED))) continue; pmap_remove_all(m); } pmap_pagedaemon_waken = 0; } /* * If it is the first entry on the list, it is actually * in the header and we must copy the following entry up * to the header. Otherwise we must search the list for * the entry. In either case we free the now unused entry. */ static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; int rtval; int s; s = splvm(); if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } rtval = 0; if (pv) { rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } splx(s); return rtval; } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { int s; pv_entry_t pv; s = splvm(); pv = get_pv_entry(); pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; splx(s); } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) { pt_entry_t oldpte; vm_page_t m; oldpte = *ptq; *ptq = 0; if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(&oldpte)); if ((oldpte & PG_FOW) == 0) { if (pmap_track_modified(va)) vm_page_dirty(m); } if ((oldpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); } return 0; } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va) { register pt_entry_t *ptq; ptq = pmap_lev3pte(pmap, va); /* * if there is no pte for this address, just skip it!!! */ if (!ptq || !pmap_pte_v(ptq)) return; /* * get a local va for mappings for this pmap. */ (void) pmap_remove_pte(pmap, ptq, va); pmap_invalidate_page(pmap, va); return; } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va, nva; if (pmap == NULL) return; if (pmap->pm_stats.resident_count == 0) return; /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pmap_remove_page(pmap, sva); return; } for (va = sva; va < eva; va = nva) { if (!pmap_pte_v(pmap_lev1pte(pmap, va))) { nva = alpha_l1trunc(va + ALPHA_L1SIZE); continue; } if (!pmap_pte_v(pmap_lev2pte(pmap, va))) { nva = alpha_l2trunc(va + ALPHA_L2SIZE); continue; } pmap_remove_page(pmap, va); nva = va + PAGE_SIZE; } } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ static void pmap_remove_all(vm_page_t m) { register pv_entry_t pv; pt_entry_t *pte, tpte; int nmodify; int s; nmodify = 0; #if defined(PMAP_DIAGNOSTIC) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); } #endif s = splvm(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); pv->pv_pmap->pm_stats.resident_count--; if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m)) panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); tpte = *pte; *pte = 0; if (tpte & PG_W) pv->pv_pmap->pm_stats.wired_count--; /* * Update the vm_page_t clean and reference bits. */ if ((tpte & PG_FOW) == 0) { if (pmap_track_modified(pv->pv_va)) vm_page_dirty(m); } if ((tpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); splx(s); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pt_entry_t* pte; int newprot; if (pmap == NULL) return; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; newprot = pte_prot(pmap, prot); if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); while (sva < eva) { /* * If level 1 pte is invalid, skip this segment */ pte = pmap_lev1pte(pmap, sva); if (!pmap_pte_v(pte)) { sva = alpha_l1trunc(sva) + ALPHA_L1SIZE; continue; } /* * If level 2 pte is invalid, skip this segment */ pte = pmap_lev2pte(pmap, sva); if (!pmap_pte_v(pte)) { sva = alpha_l2trunc(sva) + ALPHA_L2SIZE; continue; } /* * If level 3 pte is invalid, skip this page */ pte = pmap_lev3pte(pmap, sva); if (!pmap_pte_v(pte)) { sva += PAGE_SIZE; continue; } if (pmap_pte_prot(pte) != newprot) { pt_entry_t oldpte = *pte; vm_page_t m = NULL; if ((oldpte & PG_FOR) == 0) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); vm_page_flag_set(m, PG_REFERENCED); oldpte |= (PG_FOR | PG_FOE); } if ((oldpte & PG_FOW) == 0) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); if (pmap_track_modified(sva)) vm_page_dirty(m); oldpte |= PG_FOW; } oldpte = (oldpte & ~PG_PROT) | newprot; *pte = oldpte; pmap_invalidate_page(pmap, sva); } sva += PAGE_SIZE; } } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_offset_t pa; pt_entry_t *pte; vm_offset_t opa; pt_entry_t origpte, newpte; vm_page_t mpte; int managed; if (pmap == NULL) return; va &= ~PAGE_MASK; #ifdef PMAP_DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); #endif mpte = NULL; /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { mpte = pmap_allocpte(pmap, va); } pte = pmap_lev3pte(pmap, va); /* * Page Directory table entry not valid, we need a new PT page */ if (pte == NULL) { panic("pmap_enter: invalid kernel page tables pmap=%p, va=0x%lx\n", pmap, va); } origpte = *pte; pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; managed = 0; opa = pmap_pte_pa(pte); /* * Mapping has not changed, must be protection or wiring change. */ if (origpte && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && ((origpte & PG_W) == 0)) pmap->pm_stats.wired_count++; else if (!wired && (origpte & PG_W)) pmap->pm_stats.wired_count--; /* * Remove extra pte reference */ if (mpte) mpte->hold_count--; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { if ((origpte & PG_FOW) != PG_FOW && pmap_track_modified(va)) { vm_page_t om; om = PHYS_TO_VM_PAGE(opa); vm_page_dirty(om); } } managed = origpte & PG_MANAGED; goto validate; } /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. */ if (opa) { int err; err = pmap_remove_pte(pmap, pte, va); if (err) panic("pmap_enter: pte vanished, va: 0x%lx", va); } /* * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { pmap_insert_entry(pmap, va, mpte, m); managed |= PG_MANAGED; } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. */ newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed; if (managed) { /* * Set up referenced/modified emulation for the new * mapping. Any old referenced/modified emulation * results for the old mapping will have been recorded * either in pmap_remove_pte() or above in the code * which handles protection and/or wiring changes. */ newpte |= (PG_FOR | PG_FOW | PG_FOE); } if (wired) newpte |= PG_W; /* * if the mapping or permission bits are different, we need * to update the pte. */ if (origpte != newpte) { *pte = newpte; if (origpte) pmap_invalidate_page(pmap, va); if (prot & VM_PROT_EXECUTE) alpha_pal_imb(); } } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * 5. Tlbflush is deferred to calling procedure. * 6. Page IS managed. * but is *MUCH* faster than pmap_enter... */ static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) { register pt_entry_t *pte; /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { unsigned ptepindex; pt_entry_t* l2pte; /* * Calculate lev2 page index */ ptepindex = va >> ALPHA_L2SHIFT; if (mpte && (mpte->pindex == ptepindex)) { mpte->hold_count++; } else { retry: /* * Get the level 2 entry */ l2pte = pmap_lev2pte(pmap, va); /* * If the level 2 page table is mapped, we just increment * the hold count, and activate it. */ if (l2pte && pmap_pte_v(l2pte)) { if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; } else { mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex); pmap->pm_ptphint = mpte; } if (mpte == NULL) goto retry; mpte->hold_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex); } } } else { mpte = NULL; } /* * This call to vtopte makes the assumption that we are * entering the page into the current pmap. In order to support * quick entry into any pmap, one would likely use pmap_pte_quick. * But that isn't as quick as vtopte. */ pte = vtopte(va); if (*pte) { if (mpte) pmap_unwire_pte_hold(pmap, va, mpte); alpha_pal_imb(); /* XXX overkill? */ return 0; } /* * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ pmap_insert_entry(pmap, va, mpte, m); /* * Increment counters */ pmap->pm_stats.resident_count++; /* * Now validate mapping with RO protection */ *pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | PG_MANAGED | PG_FOR | PG_FOE | PG_FOW; alpha_pal_imb(); /* XXX overkill? */ return mpte; } /* * Make temporary mapping for a physical address. This is called * during dump. */ void * pmap_kenter_temporary(vm_offset_t pa, int i) { return (void *) ALPHA_PHYS_TO_K0SEG(pa - (i * PAGE_SIZE)); } #define MAX_INIT_PT (96) /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { vm_offset_t tmpidx; int psize; vm_page_t p, mpte; int objpgs; if (pmap == NULL || object == NULL) return; psize = alpha_btop(size); if ((object->type != OBJT_VNODE) || ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && (object->resident_page_count > MAX_INIT_PT))) { return; } if (psize + pindex > object->size) { if (object->size < pindex) return; psize = object->size - pindex; } mpte = NULL; /* * if we are processing a major portion of the object, then scan the * entire thing. */ if (psize > (object->resident_page_count >> 2)) { objpgs = psize; for (p = TAILQ_FIRST(&object->memq); ((objpgs > 0) && (p != NULL)); p = TAILQ_NEXT(p, listq)) { tmpidx = p->pindex; if (tmpidx < pindex) { continue; } tmpidx -= pindex; if (tmpidx >= psize) { continue; } /* * don't allow an madvise to blow away our really * free pages allocating pv entries. */ if ((limit & MAP_PREFAULT_MADVISE) && cnt.v_free_count < cnt.v_free_reserved) { break; } vm_page_lock_queues(); if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); vm_page_unlock_queues(); mpte = pmap_enter_quick(pmap, addr + alpha_ptob(tmpidx), p, mpte); vm_page_lock_queues(); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } vm_page_unlock_queues(); objpgs -= 1; } } else { /* * else lookup the pages one-by-one. */ for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { /* * don't allow an madvise to blow away our really * free pages allocating pv entries. */ if ((limit & MAP_PREFAULT_MADVISE) && cnt.v_free_count < cnt.v_free_reserved) { break; } p = vm_page_lookup(object, tmpidx + pindex); if (p == NULL) continue; vm_page_lock_queues(); if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); vm_page_unlock_queues(); mpte = pmap_enter_quick(pmap, addr + alpha_ptob(tmpidx), p, mpte); vm_page_lock_queues(); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } vm_page_unlock_queues(); } } return; } /* * pmap_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of pmap_object_init_pt, except it runs at page fault time instead * of mmap time. */ #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) static int pmap_prefault_pageorder[] = { -PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE, -3 * PAGE_SIZE, 3 * PAGE_SIZE -4 * PAGE_SIZE, 4 * PAGE_SIZE }; void pmap_prefault(pmap, addra, entry) pmap_t pmap; vm_offset_t addra; vm_map_entry_t entry; { int i; vm_offset_t starta; vm_offset_t addr; vm_pindex_t pindex; vm_page_t m, mpte; vm_object_t object; if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) return; object = entry->object.vm_object; starta = addra - PFBAK * PAGE_SIZE; if (starta < entry->start) { starta = entry->start; } else if (starta > addra) { starta = 0; } mpte = NULL; for (i = 0; i < PAGEORDER_SIZE; i++) { vm_object_t lobject; pt_entry_t *pte; addr = addra + pmap_prefault_pageorder[i]; if (addr > addra + (PFFOR * PAGE_SIZE)) addr = 0; if (addr < starta || addr >= entry->end) continue; if (!pmap_pte_v(pmap_lev1pte(pmap, addr)) || !pmap_pte_v(pmap_lev2pte(pmap, addr))) continue; pte = vtopte(addr); if (*pte) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = object; for (m = vm_page_lookup(lobject, pindex); (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); lobject = lobject->backing_object) { if (lobject->backing_object_offset & PAGE_MASK) break; pindex += (lobject->backing_object_offset >> PAGE_SHIFT); m = vm_page_lookup(lobject->backing_object, pindex); } /* * give-up when a page is not in memory */ if (m == NULL) break; vm_page_lock_queues(); if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (m->busy == 0) && (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((m->queue - m->pc) == PQ_CACHE) { vm_page_deactivate(m); } vm_page_busy(m); vm_page_unlock_queues(); mpte = pmap_enter_quick(pmap, addr, m, mpte); vm_page_lock_queues(); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); } vm_page_unlock_queues(); } } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { pt_entry_t *pte; if (pmap == NULL) return; pte = pmap_lev3pte(pmap, va); if (wired && !pmap_pte_w(pte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_pte_w(pte)) pmap->pm_stats.wired_count--; /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ pmap_pte_set_w(pte, wired); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_pagezero idle process. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(mdst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } /* * Routine: pmap_pageable * Function: * Make the specified pages (by pmap, offset) * pageable (or not) as requested. * * A page which is not pageable may not take * a fault; therefore, its page table entry * must remain valid for the duration. * * This routine is merely advisory; pmap_enter * will specify that these pages are to be wired * down (or not) as appropriate. */ void pmap_pageable(pmap, sva, eva, pageable) pmap_t pmap; vm_offset_t sva, eva; boolean_t pageable; { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap, m) pmap_t pmap; vm_page_t m; { pv_entry_t pv; int loops = 0; int s; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); /* * Not found, check current mappings returning immediately if found. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { splx(s); return TRUE; } loops++; if (loops >= 16) break; } splx(s); return (FALSE); } #define PMAP_REMOVE_PAGES_CURPROC_ONLY /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap, sva, eva) pmap_t pmap; vm_offset_t sva, eva; { pt_entry_t *pte, tpte; vm_page_t m; pv_entry_t pv, npv; int s; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } #endif s = splvm(); for(pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_va >= eva || pv->pv_va < sva) { npv = TAILQ_NEXT(pv, pv_plist); continue; } #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY pte = vtopte(pv->pv_va); #else pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); #endif if (!pmap_pte_v(pte)) panic("pmap_remove_pages: page on pm_pvlist has no pte\n"); tpte = *pte; /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { npv = TAILQ_NEXT(pv, pv_plist); continue; } *pte = 0; m = PHYS_TO_VM_PAGE(pmap_pte_pa(&tpte)); pv->pv_pmap->pm_stats.resident_count--; npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_FIRST(&m->md.pv_list) == NULL) { vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } splx(s); pmap_invalidate_all(pmap); } /* * this routine is used to modify bits in ptes */ static void pmap_changebit(vm_page_t m, int bit, boolean_t setem) { pv_entry_t pv; pt_entry_t *pte; int changed; int s; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; s = splvm(); changed = 0; /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { /* * don't write protect pager mappings */ if (!setem && bit == (PG_UWE|PG_KWE)) { if (!pmap_track_modified(pv->pv_va)) continue; } #if defined(PMAP_DIAGNOSTIC) if (!pv->pv_pmap) { printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va); continue; } #endif pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); changed = 0; if (setem) { *pte |= bit; changed = 1; } else { pt_entry_t pbits = *pte; if (pbits & bit) { changed = 1; *pte = pbits & ~bit; } } if (changed) pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } splx(s); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { pmap_changebit(m, PG_KWE|PG_UWE, FALSE); } else { pmap_remove_all(m); } } } vm_offset_t pmap_phys_address(ppn) int ppn; { return (alpha_ptob(ppn)); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; int count; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return 0; /* * Loop over current mappings looking for any which have don't * have PG_FOR set (i.e. ones where we have taken an emulate * reference trap recently). */ count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & PG_FOR)) { count++; *pte |= PG_FOR | PG_FOE; pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } } return count; } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; /* * A page is modified if any mapping has had its PG_FOW flag * cleared. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & PG_FOW)) return 1; } return 0; } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; /* * Loop over current mappings setting PG_FOW where needed. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & PG_FOW)) { *pte |= PG_FOW; pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } } } /* * pmap_page_is_free: * * Called when a page is freed to allow pmap to clean up * any extra state associated with the page. In this case * clear modified/referenced bits. */ void pmap_page_is_free(vm_page_t m) { m->md.pv_flags = 0; } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; /* * Loop over current mappings setting PG_FOR and PG_FOE where needed. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & (PG_FOR | PG_FOE))) { *pte |= (PG_FOR | PG_FOE); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } } } /* * pmap_emulate_reference: * * Emulate reference and/or modified bit hits. * From NetBSD */ void pmap_emulate_reference(struct vmspace *vm, vm_offset_t v, int user, int write) { pt_entry_t faultoff, *pte; vm_offset_t pa; int user_addr; /* * Convert process and virtual address to physical address. */ if (v >= VM_MIN_KERNEL_ADDRESS) { if (user) panic("pmap_emulate_reference: user ref to kernel"); pte = vtopte(v); user_addr = 0; } else { KASSERT(vm != NULL, ("pmap_emulate_reference: bad vmspace")); pte = pmap_lev3pte(vm->vm_map.pmap, v); user_addr = 1; } #ifdef DEBUG /* These checks are more expensive */ if (!pmap_pte_v(pte)) panic("pmap_emulate_reference: invalid pte"); #if 0 /* * Can't do these, because cpu_fork and cpu_swapin call * pmap_emulate_reference(), and the bits aren't guaranteed, * for them... */ if (write) { if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE))) panic("pmap_emulate_reference: write but unwritable"); if (!(*pte & PG_FOW)) panic("pmap_emulate_reference: write but not FOW"); } else { if (!(*pte & (user ? PG_URE : PG_URE | PG_KRE))) panic("pmap_emulate_reference: !write but unreadable"); if (!(*pte & (PG_FOR | PG_FOE))) panic("pmap_emulate_reference: !write but not FOR|FOE"); } #endif /* Other diagnostics? */ #endif pa = pmap_pte_pa(pte); KASSERT((*pte & PG_MANAGED) != 0, ("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", curthread, v, user, write, pa)); /* * Twiddle the appropriate bits to reflect the reference * and/or modification.. * * The rules: * (1) always mark page as used, and * (2) if it was a write fault, mark page as modified. */ if (write) { faultoff = PG_FOR | PG_FOE | PG_FOW; } else { faultoff = PG_FOR | PG_FOE; } *pte = (*pte & ~faultoff); ALPHA_TBIS(v); } /* * Miscellaneous support routines follow */ static void alpha_protection_init() { int prot, *kp, *up; kp = protection_codes[0]; up = protection_codes[1]; for (prot = 0; prot < 8; prot++) { switch (prot) { case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: *kp++ = PG_ASM; *up++ = 0; break; case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: *kp++ = PG_ASM | PG_KRE; *up++ = PG_URE | PG_KRE; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: *kp++ = PG_ASM | PG_KWE; *up++ = PG_UWE | PG_KWE; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: *kp++ = PG_ASM | PG_KWE | PG_KRE; *up++ = PG_UWE | PG_URE | PG_KWE | PG_KRE; break; } } } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(pa, size) vm_offset_t pa; vm_size_t size; { return (void*) ALPHA_PHYS_TO_K0SEG(pa); } void pmap_unmapdev(va, size) vm_offset_t va; vm_size_t size; { } /* * perform the pmap work for mincore */ int pmap_mincore(pmap, addr) pmap_t pmap; vm_offset_t addr; { pt_entry_t *pte; int val = 0; pte = pmap_lev3pte(pmap, addr); if (pte == 0) { return 0; } if (pmap_pte_v(pte)) { vm_page_t m; vm_offset_t pa; val = MINCORE_INCORE; if ((*pte & PG_MANAGED) == 0) return val; pa = pmap_pte_pa(pte); m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if ((*pte & PG_FOW) == 0) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; /* * Modified by someone */ else if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* * Referenced by us */ if ((*pte & (PG_FOR | PG_FOE)) == 0) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; /* * Referenced by someone */ else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } } return val; } void pmap_activate(struct thread *td) { pmap_t pmap; pmap = vmspace_pmap(td->td_proc->p_vmspace); critical_enter(); if (pmap_active[PCPU_GET(cpuid)] && pmap != pmap_active[PCPU_GET(cpuid)]) { atomic_clear_32(&pmap_active[PCPU_GET(cpuid)]->pm_active, PCPU_GET(cpumask)); pmap_active[PCPU_GET(cpuid)] = 0; } td->td_pcb->pcb_hw.apcb_ptbr = ALPHA_K0SEG_TO_PHYS((vm_offset_t) pmap->pm_lev1) >> PAGE_SHIFT; if (pmap->pm_asn[PCPU_GET(cpuid)].gen != PCPU_GET(current_asngen)) pmap_get_asn(pmap); pmap_active[PCPU_GET(cpuid)] = pmap; atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask)); td->td_pcb->pcb_hw.apcb_asn = pmap->pm_asn[PCPU_GET(cpuid)].asn; critical_exit(); if (td == curthread) { alpha_pal_swpctx((u_long)td->td_md.md_pcbpaddr); } } void pmap_deactivate(struct thread *td) { pmap_t pmap; pmap = vmspace_pmap(td->td_proc->p_vmspace); atomic_clear_32(&pmap->pm_active, PCPU_GET(cpumask)); pmap_active[PCPU_GET(cpuid)] = 0; } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { return addr; } #if 0 #if defined(PMAP_DEBUG) pmap_pid_dump(int pid) { pmap_t pmap; struct proc *p; int npte = 0; int index; sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { if (p->p_pid != pid) continue; if (p->p_vmspace) { int i,j; index = 0; pmap = vmspace_pmap(p->p_vmspace); for (i = 0; i < NPDEPG; i++) { pd_entry_t *pde; pt_entry_t *pte; vm_offset_t base = i << PDRSHIFT; pde = &pmap->pm_pdir[i]; if (pde && pmap_pde_v(pde)) { for (j = 0; j < NPTEPG; j++) { vm_offset_t va = base + (j << PAGE_SHIFT); if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { if (index) { index = 0; printf("\n"); } sx_sunlock(&allproc_lock); return npte; } pte = pmap_pte_quick(pmap, va); if (pte && pmap_pte_v(pte)) { vm_offset_t pa; vm_page_t m; pa = *(int *)pte; m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; if (index >= 2) { index = 0; printf("\n"); } else { printf(" "); } } } } } } } sx_sunlock(&allproc_lock); return npte; } #endif #if defined(DEBUG) static void pads(pmap_t pm); void pmap_pvdump(vm_offset_t pa); /* print address space of pmap*/ static void pads(pm) pmap_t pm; { int i, j; vm_offset_t va; pt_entry_t *ptep; if (pm == kernel_pmap) return; for (i = 0; i < NPDEPG; i++) if (pm->pm_pdir[i]) for (j = 0; j < NPTEPG; j++) { va = (i << PDRSHIFT) + (j << PAGE_SHIFT); if (pm == kernel_pmap && va < KERNBASE) continue; if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) continue; ptep = pmap_pte_quick(pm, va); if (pmap_pte_v(ptep)) printf("%x:%x ", va, *(int *) ptep); }; } void pmap_pvdump(pa) vm_offset_t pa; { pv_entry_t pv; vm_page_t m; printf("pa %x", pa); m = PHYS_TO_VM_PAGE(pa); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); pads(pv->pv_pmap); } printf(" "); } #endif #endif Index: head/sys/powerpc/aim/mmu_oea.c =================================================================== --- head/sys/powerpc/aim/mmu_oea.c (revision 101345) +++ head/sys/powerpc/aim/mmu_oea.c (revision 101346) @@ -1,2276 +1,2271 @@ /* * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Matt Thomas of Allegro Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */ /* * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #define TODO panic("%s: not implemented", __func__); #define PMAP_LOCK(pm) #define PMAP_UNLOCK(pm) #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) #define TLBSYNC() __asm __volatile("tlbsync"); #define SYNC() __asm __volatile("sync"); #define EIEIO() __asm __volatile("eieio"); #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) #define VSID_TO_SR(vsid) ((vsid) & 0xf) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ #define PVO_WIRED 0x0010 /* PVO entry is wired */ #define PVO_MANAGED 0x0020 /* PVO entry is managed */ #define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ #define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during bootstrap */ #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) #define PVO_PTEGIDX_CLR(pvo) \ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) #define PVO_PTEGIDX_SET(pvo, i) \ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PMAP_PVO_CHECK(pvo) struct ofw_map { vm_offset_t om_va; vm_size_t om_len; vm_offset_t om_pa; u_int om_mode; }; int pmap_bootstrapped = 0; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; /* * Physical addresses of first and last available physical page. */ vm_offset_t avail_start; vm_offset_t avail_end; /* * Map of physical memory regions. */ vm_offset_t phys_avail[128]; u_int phys_avail_count; static struct mem_region *regions; static struct mem_region *pregions; int regions_sz, pregions_sz; static struct ofw_map *translations; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * Kernel pmap. */ struct pmap kernel_pmap_store; extern struct pmap ofw_pmap; /* * PTEG data. */ static struct pteg *pmap_pteg_table; u_int pmap_pteg_count; u_int pmap_pteg_mask; /* * PVO data. */ struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ struct vm_object pmap_upvo_zone_obj; struct vm_object pmap_mpvo_zone_obj; static vm_object_t pmap_pvo_obj; static u_int pmap_pvo_count; #define BPVO_POOL_SIZE 32768 static struct pvo_entry *pmap_bpvo_pool; static int pmap_bpvo_pool_index = 0; #define VSID_NBPW (sizeof(u_int32_t) * 8) static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; static boolean_t pmap_initialized = FALSE; /* * Statistics. */ u_int pmap_pte_valid = 0; u_int pmap_pte_overflow = 0; u_int pmap_pte_replacements = 0; u_int pmap_pvo_entries = 0; u_int pmap_pvo_enter_calls = 0; u_int pmap_pvo_remove_calls = 0; u_int pmap_pte_spills = 0; SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, &pmap_pte_overflow, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, &pmap_pte_replacements, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, &pmap_pvo_enter_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, &pmap_pvo_remove_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, &pmap_pte_spills, 0, ""); struct pvo_entry *pmap_pvo_zeropage; vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; u_int pmap_rkva_count = 4; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); /* * PTE calls. */ static int pmap_pte_insert(u_int, struct pte *); /* * PVO calls. */ static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, vm_offset_t, vm_offset_t, u_int, int); static void pmap_pvo_remove(struct pvo_entry *, int); static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ static void * pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int); static struct pvo_entry *pmap_rkva_alloc(void); static void pmap_pa_map(struct pvo_entry *, vm_offset_t, struct pte *, int *); static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); static void pmap_syncicache(vm_offset_t, vm_size_t); static boolean_t pmap_query_bit(vm_page_t, int); static boolean_t pmap_clear_bit(vm_page_t, int); static void tlbia(void); static __inline int va_to_sr(u_int *sr, vm_offset_t va) { return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); } static __inline u_int va_to_pteg(u_int sr, vm_offset_t addr) { u_int hash; hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); return (hash & pmap_pteg_mask); } static __inline struct pvo_head * pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pa); if (pg_p != NULL) *pg_p = pg; if (pg == NULL) return (&pmap_pvo_unmanaged); return (&pg->md.mdpg_pvoh); } static __inline struct pvo_head * vm_page_to_pvoh(vm_page_t m) { return (&m->md.mdpg_pvoh); } static __inline void pmap_attr_clear(vm_page_t m, int ptebit) { m->md.mdpg_attrs &= ~ptebit; } static __inline int pmap_attr_fetch(vm_page_t m) { return (m->md.mdpg_attrs); } static __inline void pmap_attr_save(vm_page_t m, int ptebit) { m->md.mdpg_attrs |= ptebit; } static __inline int pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) { if (pt->pte_hi == pvo_pt->pte_hi) return (1); return (0); } static __inline int pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) { return (pt->pte_hi & ~PTE_VALID) == (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | ((va >> ADDR_API_SHFT) & PTE_API) | which); } static __inline void pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) { /* * Construct a PTE. Default to IMB initially. Valid bit only gets * set when the real pte is set in memory. * * Note: Don't set the valid bit for correct operation of tlb update. */ pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); pt->pte_lo = pte_lo; } static __inline void pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); } static __inline void pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) { /* * As shown in Section 7.6.3.2.3 */ pt->pte_lo &= ~ptebit; TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); } static __inline void pmap_pte_set(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_hi |= PTE_VALID; /* * Update the PTE as defined in section 7.6.3.1. * Note that the REF/CHG bits are from pvo_pt and thus should havce * been saved so this routine can restore them (if desired). */ pt->pte_lo = pvo_pt->pte_lo; EIEIO(); pt->pte_hi = pvo_pt->pte_hi; SYNC(); pmap_pte_valid++; } static __inline void pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { pvo_pt->pte_hi &= ~PTE_VALID; /* * Force the reg & chg bits back into the PTEs. */ SYNC(); /* * Invalidate the pte. */ pt->pte_hi &= ~PTE_VALID; SYNC(); TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); /* * Save the reg & chg bits. */ pmap_pte_synch(pt, pvo_pt); pmap_pte_valid--; } static __inline void pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { /* * Invalidate the PTE */ pmap_pte_unset(pt, pvo_pt, va); pmap_pte_set(pt, pvo_pt); } /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *regiona; const struct mem_region *regionb; regiona = a; regionb = b; if (regiona->mr_start < regionb->mr_start) return (-1); else if (regiona->mr_start > regionb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *mapa; const struct ofw_map *mapb; mapa = a; mapb = b; if (mapa->om_pa < mapb->om_pa) return (-1); else if (mapa->om_pa > mapb->om_pa) return (1); else return (0); } void pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen, mmu; int sz; int i, j; vm_size_t size, physsz; vm_offset_t pa, va, off; u_int batl, batu; /* * Set up BAT0 to only map the lowest 256 MB area */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); /* * Map PCI memory space. */ battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); /* * Map obio devices. */ battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); /* * Use an IBAT and a DBAT to map the bottom segment of memory * where we are. */ batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x00000000, BAT_M, BAT_PP_RW); __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" :: "r"(batu), "r"(batl)); #if 0 /* map frame buffer */ batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif #if 1 /* map pci space */ batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif /* * Set the start and end of kva. */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); for (i = 0; i < pregions_sz; i++) { CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", pregions[i].mr_start, pregions[i].mr_start + pregions[i].mr_size, pregions[i].mr_size); } if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) panic("pmap_bootstrap: phys_avail too small"); qsort(regions, regions_sz, sizeof(*regions), mr_cmp); phys_avail_count = 0; physsz = 0; for (i = 0, j = 0; i < regions_sz; i++, j += 2) { CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, regions[i].mr_start + regions[i].mr_size, regions[i].mr_size); phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; phys_avail_count++; physsz += regions[i].mr_size; } physmem = btoc(physsz); /* * Allocate PTEG table. */ #ifdef PTEGCOUNT pmap_pteg_count = PTEGCOUNT; #else pmap_pteg_count = 0x1000; while (pmap_pteg_count < physmem) pmap_pteg_count <<= 1; pmap_pteg_count >>= 1; #endif /* PTEGCOUNT */ size = pmap_pteg_count * sizeof(struct pteg); CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, size); pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); pmap_pteg_mask = pmap_pteg_count - 1; /* * Allocate pv/overflow lists. */ size = sizeof(struct pvo_head) * pmap_pteg_count; pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, PAGE_SIZE); CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); for (i = 0; i < pmap_pteg_count; i++) LIST_INIT(&pmap_pvo_table[i]); /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); /* * Initialise the unmanaged pvo pool. */ pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); pmap_bpvo_pool_index = 0; /* * Make sure kernel vsid is allocated as well as VSID 0. */ pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); pmap_vsid_bitmap[0] |= 1; /* * Set up the OpenFirmware pmap and add it's mappings. */ pmap_pinit(&ofw_pmap); ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; if ((chosen = OF_finddevice("/chosen")) == -1) panic("pmap_bootstrap: can't find /chosen"); OF_getprop(chosen, "mmu", &mmui, 4); if ((mmu = OF_instance_to_package(mmui)) == -1) panic("pmap_bootstrap: can't get mmu package"); if ((sz = OF_getproplen(mmu, "translations")) == -1) panic("pmap_bootstrap: can't get ofw translation count"); translations = NULL; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (phys_avail[i + 1] >= sz) translations = (struct ofw_map *)phys_avail[i]; } if (translations == NULL) panic("pmap_bootstrap: no space to copy translations"); bzero(translations, sz); if (OF_getprop(mmu, "translations", translations, sz) == -1) panic("pmap_bootstrap: can't get ofw translations"); CTR0(KTR_PMAP, "pmap_bootstrap: translations"); sz /= sizeof(*translations); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", translations[i].om_pa, translations[i].om_va, translations[i].om_len); /* Drop stuff below something? */ /* Enter the pages? */ for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { struct vm_page m; m.phys_addr = translations[i].om_pa + off; pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, VM_PROT_ALL, 1); } } #ifdef SMP TLBSYNC(); #endif /* * Initialize the kernel pmap (which is statically allocated). */ for (i = 0; i < 16; i++) { kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; } kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; kernel_pmap->pm_active = ~0; /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. */ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); kstack0_phys = pa; kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, kstack0); virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; pmap_kenter(va, pa); TLBIE(va); } /* * Calculate the first and last available physical addresses. */ avail_start = phys_avail[0]; for (i = 0; phys_avail[i + 2] != 0; i += 2) ; avail_end = phys_avail[i + 1]; Maxmem = powerpc_btop(avail_end); /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize hardware. */ for (i = 0; i < 16; i++) { mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); } __asm __volatile ("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); __asm __volatile ("sync; mtsdr1 %0; isync" :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); tlbia(); pmap_bootstrapped++; } /* * Activate a user pmap. The pmap must be activated before it's address * space can be accessed in any way. */ void pmap_activate(struct thread *td) { pmap_t pm, pmr; /* * Load all the data we need up front to encourasge the compiler to * not issue any loads while we have interrupts disabled below. */ pm = &td->td_proc->p_vmspace->vm_pmap; KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) pmr = pm; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(curpmap, pmr); } void pmap_deactivate(struct thread *td) { pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; pm->pm_active &= ~(PCPU_GET(cpumask)); PCPU_SET(curpmap, NULL); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { if (wired) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) pm->pm_stats.wired_count++; pvo->pvo_vaddr |= PVO_WIRED; } else { if ((pvo->pvo_vaddr & PVO_WIRED) != 0) pm->pm_stats.wired_count--; pvo->pvo_vaddr &= ~PVO_WIRED; } } } void pmap_clear_modify(vm_page_t m) { if (m->flags * PG_FICTITIOUS) return; pmap_clear_bit(m, PTE_CHG); } void pmap_collect(void) { TODO; } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { /* * This is not needed as it's mainly an optimisation. * It may want to be implemented later though. */ } void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); kcopy((void *)src, (void *)dst, PAGE_SIZE); } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_page_t m) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, PAGE_SIZE); for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, size); for (i = size / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_idle(vm_page_t m) { /* XXX this is called outside of Giant, is pmap_zero_page safe? */ /* XXX maybe have a dedicated mapping for this to avoid the problem? */ mtx_lock(&Giant); pmap_zero_page(m); mtx_unlock(&Giant); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct pvo_head *pvo_head; uma_zone_t zone; vm_page_t pg; u_int pte_lo, pvo_flags, was_exec, i; int error; if (!pmap_initialized) { pvo_head = &pmap_pvo_kunmanaged; zone = pmap_upvo_zone; pvo_flags = 0; pg = NULL; was_exec = PTE_EXEC; } else { pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg); zone = pmap_mpvo_zone; pvo_flags = PVO_MANAGED; was_exec = 0; } /* * If this is a managed page, and it's the first reference to the page, * clear the execness of the page. Otherwise fetch the execness. */ if (pg != NULL) { if (LIST_EMPTY(pvo_head)) { pmap_attr_clear(pg, PTE_EXEC); } else { was_exec = pmap_attr_fetch(pg) & PTE_EXEC; } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ pte_lo = PTE_I | PTE_G; for (i = 0; i < pregions_sz; i++) { if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && (VM_PAGE_TO_PHYS(m) < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(PTE_I | PTE_G); break; } } if (prot & VM_PROT_WRITE) pte_lo |= PTE_BW; else pte_lo |= PTE_BR; pvo_flags |= (prot & VM_PROT_EXECUTE); if (wired) pvo_flags |= PVO_WIRED; error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); /* * Flush the real page from the instruction cache if this page is * mapped executable and cacheable and was not previously mapped (or * was not mapped executable). */ if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0 && was_exec == 0) { /* * Flush the real memory from the cache. */ pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); if (pg != NULL) pmap_attr_save(pg, PTE_EXEC); } } vm_offset_t pmap_extract(pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } return (0); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { CTR0(KTR_PMAP, "pmap_init"); pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16); pmap_pvo_count = 0; pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf); pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf); pmap_initialized = TRUE; } void pmap_init2(void) { CTR0(KTR_PMAP, "pmap_init2"); } boolean_t pmap_is_modified(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return (FALSE); return (pmap_query_bit(m, PTE_CHG)); } void pmap_clear_reference(vm_page_t m) { TODO; } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { TODO; return (0); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { u_int pte_lo; int error; int i; #if 0 if (va < VM_MIN_KERNEL_ADDRESS) panic("pmap_kenter: attempt to enter non-kernel address %#x", va); #endif pte_lo = PTE_I | PTE_G | PTE_BW; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { pte_lo &= ~(PTE_I | PTE_G); break; } } error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); if (error != 0 && error != ENOENT) panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, pa, error); /* * Flush the real memory from the instruction cache. */ if ((pte_lo & (PTE_I | PTE_G)) == 0) { pmap_syncicache(pa, PAGE_SIZE); } } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); if (pvo == NULL) { return (0); } return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { vm_offset_t sva, va; sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter(va, pa_start); *virt = va; return (sva); } int pmap_mincore(pmap_t pmap, vm_offset_t addr) { TODO; return (0); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); /* XXX */ } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; struct pte *pt; /* * Since the routine only downgrades protection, if the * maximal protection is desired, there isn't any change * to be made. */ if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE)) return; pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); PMAP_PVO_CHECK(pvo); /* sanity check */ /* * Downgrading to no mapping at all, we just remove the entry. */ if ((prot & VM_PROT_READ) == 0) { pmap_pvo_remove(pvo, -1); continue; } /* * If EXEC permission is being revoked, just clear the flag * in the PVO. */ if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * If this entry is already RO, don't diddle with the page * table. */ if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { PMAP_PVO_CHECK(pvo); continue; } /* * Grab the PTE before we diddle the bits so pvo_to_pte can * verify the pte contents are as expected. */ pt = pmap_pvo_to_pte(pvo, -1); pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PMAP_PVO_CHECK(pvo); /* sanity check */ } } /* * Make the specified page pageable (or not). Unneeded. */ void pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { TODO; return (0); } static u_int pmap_vsidcontext; void pmap_pinit(pmap_t pmap) { int i, mask; u_int entropy; entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); /* * Allocate some segment registers for this pmap. */ for (i = 0; i < NPMAPS; i += VSID_NBPW) { u_int hash, n; /* * Create a new value by mutiplying by a prime and adding in * entropy from the timebase register. This is to make the * VSID more random so that the PT hash function collides * less often. (Note that the prime casues gcc to do shifts * instead of a multiply.) */ pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; hash = pmap_vsidcontext & (NPMAPS - 1); if (hash == 0) /* 0 is special, avoid it */ continue; n = hash >> 5; mask = 1 << (hash & (VSID_NBPW - 1)); hash = (pmap_vsidcontext & 0xfffff); if (pmap_vsid_bitmap[n] & mask) { /* collision? */ /* anything free in this bucket? */ if (pmap_vsid_bitmap[n] == 0xffffffff) { entropy = (pmap_vsidcontext >> 20); continue; } i = ffs(~pmap_vsid_bitmap[i]) - 1; mask = 1 << i; hash &= 0xfffff & ~(VSID_NBPW - 1); hash |= i; } pmap_vsid_bitmap[n] |= mask; for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); return; } panic("pmap_pinit: out of segments"); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { pmap_pinit(pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } void pmap_pinit2(pmap_t pmap) { /* XXX: Remove this stub when no longer called */ } void pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_prefault: non current pmap")); /* XXX */ } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo; struct pte *pt; int pteidx; CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo == NULL) continue; if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE * copy. */ pt = pmap_pvo_to_pte(pvo, pteidx); /* * Change the protection of the page. */ pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; /* * If the PVO is in the page table, update that pte as well. */ if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); } } vm_offset_t pmap_phys_address(int ppn) { TODO; return (0); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t va, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kremove(va); } void pmap_release(pmap_t pmap) { TODO; } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo; int pteidx; for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo != NULL) { pmap_pvo_remove(pvo, pteidx); } } } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove in the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); pmap_remove(pm, sva, eva); } /* * Create the kernel stack and pcb for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. */ void pmap_new_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; u_int i; /* * Allocate object for the kstack. */ ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; /* * Get a kernel virtual address for the kstack for this thread. */ ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); TLBIE(ks); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; td->td_kstack = ks; for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page. */ - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(ksobj, i, + VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); /* - * Wire the page. - */ - m->wire_count++; - - /* * Enter the page into the kernel address space. */ pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } void pmap_dispose_thread(struct thread *td) { TODO; } void pmap_swapin_thread(struct thread *td) { TODO; } void pmap_swapout_thread(struct thread *td) { TODO; } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size, u_int align) { vm_offset_t s, e; int i, j; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (align != 0) s = (phys_avail[i] + align - 1) & ~(align - 1); else s = phys_avail[i]; e = s + size; if (s < phys_avail[i] || e > phys_avail[i + 1]) continue; if (s == phys_avail[i]) { phys_avail[i] += size; } else if (e == phys_avail[i + 1]) { phys_avail[i + 1] -= size; } else { for (j = phys_avail_count * 2; j > i; j -= 2) { phys_avail[j] = phys_avail[j - 2]; phys_avail[j + 1] = phys_avail[j - 1]; } phys_avail[i + 3] = phys_avail[i + 1]; phys_avail[i + 1] = s; phys_avail[i + 2] = e; phys_avail_count++; } return (s); } panic("pmap_bootstrap_alloc: could not allocate memory"); } /* * Return an unmapped pvo for a kernel virtual address. * Used by pmap functions that operate on physical pages. */ static struct pvo_entry * pmap_rkva_alloc(void) { struct pvo_entry *pvo; struct pte *pt; vm_offset_t kva; int pteidx; if (pmap_rkva_count == 0) panic("pmap_rkva_alloc: no more reserved KVAs"); kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); pmap_kenter(kva, 0); pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); if (pvo == NULL) panic("pmap_kva_alloc: pmap_pvo_find_va failed"); pt = pmap_pvo_to_pte(pvo, pteidx); if (pt == NULL) panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; return (pvo); } static void pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, int *depth_p) { struct pte *pt; /* * If this pvo already has a valid pte, we need to save it so it can * be restored later. We then just reload the new PTE over the old * slot. */ if (saved_pt != NULL) { pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } *saved_pt = pvo->pvo_pte; pvo->pvo_pte.pte_lo &= ~PTE_RPGN; } pvo->pvo_pte.pte_lo |= pa; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_map: could not spill pvo %p", pvo); if (depth_p != NULL) (*depth_p)++; } static void pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) { struct pte *pt; pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } pvo->pvo_pte.pte_lo &= ~PTE_RPGN; /* * If there is a saved PTE and it's valid, restore it and return. */ if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { if (depth_p != NULL && --(*depth_p) == 0) panic("pmap_pa_unmap: restoring but depth == 0"); pvo->pvo_pte = *saved_pt; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_unmap: could not spill pvo %p", pvo); } } static void pmap_syncicache(vm_offset_t pa, vm_size_t len) { __syncicache((void *)pa, len); } static void tlbia(void) { caddr_t i; SYNC(); for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { TLBIE(i); EIEIO(); } TLBSYNC(); SYNC(); } static int pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) { struct pvo_entry *pvo; u_int sr; int first; u_int ptegidx; int i; pmap_pvo_enter_calls++; first = 0; /* * Compute the PTE Group index. */ va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); /* * Remove any existing mapping for this page. Reuse the pvo entry if * there is a mapping. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && (pvo->pvo_pte.pte_lo & PTE_PP) == (pte_lo & PTE_PP)) { return (0); } pmap_pvo_remove(pvo, -1); break; } } /* * If we aren't overwriting a mapping, try to allocate. */ if (pmap_initialized) { pvo = uma_zalloc(zone, M_NOWAIT); } else { if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", pmap_bpvo_pool_index, BPVO_POOL_SIZE, BPVO_POOL_SIZE * sizeof(struct pvo_entry)); } pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; pmap_bpvo_pool_index++; pvo->pvo_vaddr |= PVO_BOOTSTRAP; } if (pvo == NULL) { return (ENOMEM); } pmap_pvo_entries++; pvo->pvo_vaddr = va; pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; if (flags & VM_PROT_EXECUTE) pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &pmap_pvo_kunmanaged) pvo->pvo_vaddr |= PVO_MANAGED; pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); /* * Remember if the list was empty and therefore will be the first * item. */ if (LIST_FIRST(pvo_head) == NULL) first = 1; LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count++; pvo->pvo_pmap->pm_stats.resident_count++; /* * We hope this succeeds but it isn't required. */ i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (i >= 0) { PVO_PTEGIDX_SET(pvo, i); } else { panic("pmap_pvo_enter: overflow"); pmap_pte_overflow++; } return (first ? ENOENT : 0); } static void pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If there is an active pte entry, we need to deactivate it (and * save the ref & cfg bits). */ pt = pmap_pvo_to_pte(pvo, pteidx); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); } else { pmap_pte_overflow--; } /* * Update our statistics. */ pvo->pvo_pmap->pm_stats.resident_count--; if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count--; /* * Save the REF/CHG bits into their cache if the page is managed. */ if (pvo->pvo_vaddr & PVO_MANAGED) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); if (pg != NULL) { pmap_attr_save(pg, pvo->pvo_pte.pte_lo & (PTE_REF | PTE_CHG)); } } /* * Remove this PVO from the PV list. */ LIST_REMOVE(pvo, pvo_vlink); /* * Remove this from the overflow list and return it to the pool * if we aren't going to reuse it. */ LIST_REMOVE(pvo, pvo_olink); if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone, pvo); pmap_pvo_entries--; pmap_pvo_remove_calls++; } static __inline int pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) { int pteidx; /* * We can find the actual pte entry without searching by grabbing * the PTEG index from 3 unused bits in pte_lo[11:9] and by * noticing the HID bit. */ pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); if (pvo->pvo_pte.pte_hi & PTE_HID) pteidx ^= pmap_pteg_mask * 8; return (pteidx); } static struct pvo_entry * pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) { struct pvo_entry *pvo; int ptegidx; u_int sr; va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if (pteidx_p) *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); return (pvo); } } return (NULL); } static struct pte * pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If we haven't been supplied the ptegidx, calculate it. */ if (pteidx == -1) { int ptegidx; u_int sr; sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); pteidx = pmap_pvo_pte_index(pvo, ptegidx); } pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " "valid pte index", pvo); } if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " "pvo but no valid pte", pvo); } if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { panic("pmap_pvo_to_pte: pvo %p has valid pte in " "pmap_pteg_table %p but invalid in pvo", pvo, pt); } if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { panic("pmap_pvo_to_pte: pvo %p pte does not match " "pte %p in pmap_pteg_table", pvo, pt); } return (pt); } if (pvo->pvo_pte.pte_hi & PTE_VALID) { panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " "pmap_pteg_table but valid in pvo", pvo, pt); } return (NULL); } static void * pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { vm_page_t m; if (bytes != PAGE_SIZE) panic("pmap_pvo_allocf: benno was shortsighted. hit him."); *flags = UMA_SLAB_PRIV; m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM); if (m == NULL) return (NULL); pmap_pvo_count++; return ((void *)VM_PAGE_TO_PHYS(m)); } /* * XXX: THIS STUFF SHOULD BE IN pte.c? */ int pmap_pte_spill(vm_offset_t addr) { struct pvo_entry *source_pvo, *victim_pvo; struct pvo_entry *pvo; int ptegidx, i, j; u_int sr; struct pteg *pteg; struct pte *pt; pmap_pte_spills++; sr = mfsrin(addr); ptegidx = va_to_pteg(sr, addr); /* * Have to substitute some entry. Use the primary hash for this. * Use low bits of timebase as random generator. */ pteg = &pmap_pteg_table[ptegidx]; __asm __volatile("mftb %0" : "=r"(i)); i &= 7; pt = &pteg->pt[i]; source_pvo = NULL; victim_pvo = NULL; LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { /* * We need to find a pvo entry for this address. */ PMAP_PVO_CHECK(pvo); if (source_pvo == NULL && pmap_pte_match(&pvo->pvo_pte, sr, addr, pvo->pvo_pte.pte_hi & PTE_HID)) { /* * Now found an entry to be spilled into the pteg. * The PTE is now valid, so we know it's active. */ j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (j >= 0) { PVO_PTEGIDX_SET(pvo, j); pmap_pte_overflow--; PMAP_PVO_CHECK(pvo); return (1); } source_pvo = pvo; if (victim_pvo != NULL) break; } /* * We also need the pvo entry of the victim we are replacing * so save the R & C bits of the PTE. */ if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; if (source_pvo != NULL) break; } } if (source_pvo == NULL) return (0); if (victim_pvo == NULL) { if ((pt->pte_hi & PTE_HID) == 0) panic("pmap_pte_spill: victim p-pte (%p) has no pvo" "entry", pt); /* * If this is a secondary PTE, we need to search it's primary * pvo bucket for the matching PVO. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], pvo_olink) { PMAP_PVO_CHECK(pvo); /* * We also need the pvo entry of the victim we are * replacing so save the R & C bits of the PTE. */ if (pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; break; } } if (victim_pvo == NULL) panic("pmap_pte_spill: victim s-pte (%p) has no pvo" "entry", pt); } /* * We are invalidating the TLB entry for the EA we are replacing even * though it's valid. If we don't, we lose any ref/chg bit changes * contained in the TLB entry. */ source_pvo->pvo_pte.pte_hi &= ~PTE_HID; pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); pmap_pte_set(pt, &source_pvo->pvo_pte); PVO_PTEGIDX_CLR(victim_pvo); PVO_PTEGIDX_SET(source_pvo, i); pmap_pte_replacements++; PMAP_PVO_CHECK(victim_pvo); PMAP_PVO_CHECK(source_pvo); return (1); } static int pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) { struct pte *pt; int i; /* * First try primary hash. */ for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi &= ~PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } /* * Now try secondary hash. */ ptegidx ^= pmap_pteg_mask; ptegidx++; for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi |= PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } panic("pmap_pte_insert: overflow"); return (-1); } static boolean_t pmap_query_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; if (pmap_attr_fetch(m) & ptebit) return (TRUE); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if we saved the bit off. If so, cache it and return * success. */ if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } /* * No luck, now go through the hard part of looking at the PTEs * themselves. Sync so that any pending REF/CHG bits are flushed to * the PTEs. */ SYNC(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if this pvo has a valid PTE. if so, fetch the * REF/CHG bits from the valid PTE. If the appropriate * ptebit is set, cache it and return success. */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } } return (TRUE); } static boolean_t pmap_clear_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; int rv; /* * Clear the cached value. */ rv = pmap_attr_fetch(m); pmap_attr_clear(m, ptebit); /* * Sync so that any pending REF/CHG bits are flushed to the PTEs (so * we can reset the right ones). note that since the pvo entries and * list heads are accessed via BAT0 and are never placed in the page * table, we don't have to worry about further accesses setting the * REF/CHG bits. */ SYNC(); /* * For each pvo entry, clear the pvo's ptebit. If this pvo has a * valid pte clear the ptebit from the valid pte. */ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); } rv |= pvo->pvo_pte.pte_lo; pvo->pvo_pte.pte_lo &= ~ptebit; PMAP_PVO_CHECK(pvo); /* sanity check */ } return ((rv & ptebit) != 0); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, tmpva, offset; pa = trunc_page(pa); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); GIANT_REQUIRED; va = kmem_alloc_pageable(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { pmap_kenter(tmpva, pa); TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } return ((void *)(va + offset)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); kmem_free(kernel_map, base, size); } Index: head/sys/powerpc/powerpc/mmu_oea.c =================================================================== --- head/sys/powerpc/powerpc/mmu_oea.c (revision 101345) +++ head/sys/powerpc/powerpc/mmu_oea.c (revision 101346) @@ -1,2276 +1,2271 @@ /* * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Matt Thomas of Allegro Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */ /* * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #define TODO panic("%s: not implemented", __func__); #define PMAP_LOCK(pm) #define PMAP_UNLOCK(pm) #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) #define TLBSYNC() __asm __volatile("tlbsync"); #define SYNC() __asm __volatile("sync"); #define EIEIO() __asm __volatile("eieio"); #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) #define VSID_TO_SR(vsid) ((vsid) & 0xf) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ #define PVO_WIRED 0x0010 /* PVO entry is wired */ #define PVO_MANAGED 0x0020 /* PVO entry is managed */ #define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ #define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during bootstrap */ #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) #define PVO_PTEGIDX_CLR(pvo) \ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) #define PVO_PTEGIDX_SET(pvo, i) \ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PMAP_PVO_CHECK(pvo) struct ofw_map { vm_offset_t om_va; vm_size_t om_len; vm_offset_t om_pa; u_int om_mode; }; int pmap_bootstrapped = 0; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; /* * Physical addresses of first and last available physical page. */ vm_offset_t avail_start; vm_offset_t avail_end; /* * Map of physical memory regions. */ vm_offset_t phys_avail[128]; u_int phys_avail_count; static struct mem_region *regions; static struct mem_region *pregions; int regions_sz, pregions_sz; static struct ofw_map *translations; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * Kernel pmap. */ struct pmap kernel_pmap_store; extern struct pmap ofw_pmap; /* * PTEG data. */ static struct pteg *pmap_pteg_table; u_int pmap_pteg_count; u_int pmap_pteg_mask; /* * PVO data. */ struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ struct vm_object pmap_upvo_zone_obj; struct vm_object pmap_mpvo_zone_obj; static vm_object_t pmap_pvo_obj; static u_int pmap_pvo_count; #define BPVO_POOL_SIZE 32768 static struct pvo_entry *pmap_bpvo_pool; static int pmap_bpvo_pool_index = 0; #define VSID_NBPW (sizeof(u_int32_t) * 8) static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; static boolean_t pmap_initialized = FALSE; /* * Statistics. */ u_int pmap_pte_valid = 0; u_int pmap_pte_overflow = 0; u_int pmap_pte_replacements = 0; u_int pmap_pvo_entries = 0; u_int pmap_pvo_enter_calls = 0; u_int pmap_pvo_remove_calls = 0; u_int pmap_pte_spills = 0; SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, &pmap_pte_overflow, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, &pmap_pte_replacements, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, &pmap_pvo_enter_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, &pmap_pvo_remove_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, &pmap_pte_spills, 0, ""); struct pvo_entry *pmap_pvo_zeropage; vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; u_int pmap_rkva_count = 4; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); /* * PTE calls. */ static int pmap_pte_insert(u_int, struct pte *); /* * PVO calls. */ static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, vm_offset_t, vm_offset_t, u_int, int); static void pmap_pvo_remove(struct pvo_entry *, int); static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ static void * pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int); static struct pvo_entry *pmap_rkva_alloc(void); static void pmap_pa_map(struct pvo_entry *, vm_offset_t, struct pte *, int *); static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); static void pmap_syncicache(vm_offset_t, vm_size_t); static boolean_t pmap_query_bit(vm_page_t, int); static boolean_t pmap_clear_bit(vm_page_t, int); static void tlbia(void); static __inline int va_to_sr(u_int *sr, vm_offset_t va) { return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); } static __inline u_int va_to_pteg(u_int sr, vm_offset_t addr) { u_int hash; hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); return (hash & pmap_pteg_mask); } static __inline struct pvo_head * pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pa); if (pg_p != NULL) *pg_p = pg; if (pg == NULL) return (&pmap_pvo_unmanaged); return (&pg->md.mdpg_pvoh); } static __inline struct pvo_head * vm_page_to_pvoh(vm_page_t m) { return (&m->md.mdpg_pvoh); } static __inline void pmap_attr_clear(vm_page_t m, int ptebit) { m->md.mdpg_attrs &= ~ptebit; } static __inline int pmap_attr_fetch(vm_page_t m) { return (m->md.mdpg_attrs); } static __inline void pmap_attr_save(vm_page_t m, int ptebit) { m->md.mdpg_attrs |= ptebit; } static __inline int pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) { if (pt->pte_hi == pvo_pt->pte_hi) return (1); return (0); } static __inline int pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) { return (pt->pte_hi & ~PTE_VALID) == (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | ((va >> ADDR_API_SHFT) & PTE_API) | which); } static __inline void pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) { /* * Construct a PTE. Default to IMB initially. Valid bit only gets * set when the real pte is set in memory. * * Note: Don't set the valid bit for correct operation of tlb update. */ pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); pt->pte_lo = pte_lo; } static __inline void pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); } static __inline void pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) { /* * As shown in Section 7.6.3.2.3 */ pt->pte_lo &= ~ptebit; TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); } static __inline void pmap_pte_set(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_hi |= PTE_VALID; /* * Update the PTE as defined in section 7.6.3.1. * Note that the REF/CHG bits are from pvo_pt and thus should havce * been saved so this routine can restore them (if desired). */ pt->pte_lo = pvo_pt->pte_lo; EIEIO(); pt->pte_hi = pvo_pt->pte_hi; SYNC(); pmap_pte_valid++; } static __inline void pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { pvo_pt->pte_hi &= ~PTE_VALID; /* * Force the reg & chg bits back into the PTEs. */ SYNC(); /* * Invalidate the pte. */ pt->pte_hi &= ~PTE_VALID; SYNC(); TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); /* * Save the reg & chg bits. */ pmap_pte_synch(pt, pvo_pt); pmap_pte_valid--; } static __inline void pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { /* * Invalidate the PTE */ pmap_pte_unset(pt, pvo_pt, va); pmap_pte_set(pt, pvo_pt); } /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *regiona; const struct mem_region *regionb; regiona = a; regionb = b; if (regiona->mr_start < regionb->mr_start) return (-1); else if (regiona->mr_start > regionb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *mapa; const struct ofw_map *mapb; mapa = a; mapb = b; if (mapa->om_pa < mapb->om_pa) return (-1); else if (mapa->om_pa > mapb->om_pa) return (1); else return (0); } void pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen, mmu; int sz; int i, j; vm_size_t size, physsz; vm_offset_t pa, va, off; u_int batl, batu; /* * Set up BAT0 to only map the lowest 256 MB area */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); /* * Map PCI memory space. */ battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); /* * Map obio devices. */ battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); /* * Use an IBAT and a DBAT to map the bottom segment of memory * where we are. */ batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x00000000, BAT_M, BAT_PP_RW); __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" :: "r"(batu), "r"(batl)); #if 0 /* map frame buffer */ batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif #if 1 /* map pci space */ batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif /* * Set the start and end of kva. */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); for (i = 0; i < pregions_sz; i++) { CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", pregions[i].mr_start, pregions[i].mr_start + pregions[i].mr_size, pregions[i].mr_size); } if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) panic("pmap_bootstrap: phys_avail too small"); qsort(regions, regions_sz, sizeof(*regions), mr_cmp); phys_avail_count = 0; physsz = 0; for (i = 0, j = 0; i < regions_sz; i++, j += 2) { CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, regions[i].mr_start + regions[i].mr_size, regions[i].mr_size); phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; phys_avail_count++; physsz += regions[i].mr_size; } physmem = btoc(physsz); /* * Allocate PTEG table. */ #ifdef PTEGCOUNT pmap_pteg_count = PTEGCOUNT; #else pmap_pteg_count = 0x1000; while (pmap_pteg_count < physmem) pmap_pteg_count <<= 1; pmap_pteg_count >>= 1; #endif /* PTEGCOUNT */ size = pmap_pteg_count * sizeof(struct pteg); CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, size); pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); pmap_pteg_mask = pmap_pteg_count - 1; /* * Allocate pv/overflow lists. */ size = sizeof(struct pvo_head) * pmap_pteg_count; pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, PAGE_SIZE); CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); for (i = 0; i < pmap_pteg_count; i++) LIST_INIT(&pmap_pvo_table[i]); /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); /* * Initialise the unmanaged pvo pool. */ pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); pmap_bpvo_pool_index = 0; /* * Make sure kernel vsid is allocated as well as VSID 0. */ pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); pmap_vsid_bitmap[0] |= 1; /* * Set up the OpenFirmware pmap and add it's mappings. */ pmap_pinit(&ofw_pmap); ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; if ((chosen = OF_finddevice("/chosen")) == -1) panic("pmap_bootstrap: can't find /chosen"); OF_getprop(chosen, "mmu", &mmui, 4); if ((mmu = OF_instance_to_package(mmui)) == -1) panic("pmap_bootstrap: can't get mmu package"); if ((sz = OF_getproplen(mmu, "translations")) == -1) panic("pmap_bootstrap: can't get ofw translation count"); translations = NULL; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (phys_avail[i + 1] >= sz) translations = (struct ofw_map *)phys_avail[i]; } if (translations == NULL) panic("pmap_bootstrap: no space to copy translations"); bzero(translations, sz); if (OF_getprop(mmu, "translations", translations, sz) == -1) panic("pmap_bootstrap: can't get ofw translations"); CTR0(KTR_PMAP, "pmap_bootstrap: translations"); sz /= sizeof(*translations); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", translations[i].om_pa, translations[i].om_va, translations[i].om_len); /* Drop stuff below something? */ /* Enter the pages? */ for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { struct vm_page m; m.phys_addr = translations[i].om_pa + off; pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, VM_PROT_ALL, 1); } } #ifdef SMP TLBSYNC(); #endif /* * Initialize the kernel pmap (which is statically allocated). */ for (i = 0; i < 16; i++) { kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; } kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; kernel_pmap->pm_active = ~0; /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. */ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); kstack0_phys = pa; kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, kstack0); virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; pmap_kenter(va, pa); TLBIE(va); } /* * Calculate the first and last available physical addresses. */ avail_start = phys_avail[0]; for (i = 0; phys_avail[i + 2] != 0; i += 2) ; avail_end = phys_avail[i + 1]; Maxmem = powerpc_btop(avail_end); /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize hardware. */ for (i = 0; i < 16; i++) { mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); } __asm __volatile ("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); __asm __volatile ("sync; mtsdr1 %0; isync" :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); tlbia(); pmap_bootstrapped++; } /* * Activate a user pmap. The pmap must be activated before it's address * space can be accessed in any way. */ void pmap_activate(struct thread *td) { pmap_t pm, pmr; /* * Load all the data we need up front to encourasge the compiler to * not issue any loads while we have interrupts disabled below. */ pm = &td->td_proc->p_vmspace->vm_pmap; KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) pmr = pm; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(curpmap, pmr); } void pmap_deactivate(struct thread *td) { pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; pm->pm_active &= ~(PCPU_GET(cpumask)); PCPU_SET(curpmap, NULL); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { if (wired) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) pm->pm_stats.wired_count++; pvo->pvo_vaddr |= PVO_WIRED; } else { if ((pvo->pvo_vaddr & PVO_WIRED) != 0) pm->pm_stats.wired_count--; pvo->pvo_vaddr &= ~PVO_WIRED; } } } void pmap_clear_modify(vm_page_t m) { if (m->flags * PG_FICTITIOUS) return; pmap_clear_bit(m, PTE_CHG); } void pmap_collect(void) { TODO; } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { /* * This is not needed as it's mainly an optimisation. * It may want to be implemented later though. */ } void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); kcopy((void *)src, (void *)dst, PAGE_SIZE); } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_page_t m) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, PAGE_SIZE); for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, size); for (i = size / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_idle(vm_page_t m) { /* XXX this is called outside of Giant, is pmap_zero_page safe? */ /* XXX maybe have a dedicated mapping for this to avoid the problem? */ mtx_lock(&Giant); pmap_zero_page(m); mtx_unlock(&Giant); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct pvo_head *pvo_head; uma_zone_t zone; vm_page_t pg; u_int pte_lo, pvo_flags, was_exec, i; int error; if (!pmap_initialized) { pvo_head = &pmap_pvo_kunmanaged; zone = pmap_upvo_zone; pvo_flags = 0; pg = NULL; was_exec = PTE_EXEC; } else { pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg); zone = pmap_mpvo_zone; pvo_flags = PVO_MANAGED; was_exec = 0; } /* * If this is a managed page, and it's the first reference to the page, * clear the execness of the page. Otherwise fetch the execness. */ if (pg != NULL) { if (LIST_EMPTY(pvo_head)) { pmap_attr_clear(pg, PTE_EXEC); } else { was_exec = pmap_attr_fetch(pg) & PTE_EXEC; } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ pte_lo = PTE_I | PTE_G; for (i = 0; i < pregions_sz; i++) { if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && (VM_PAGE_TO_PHYS(m) < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(PTE_I | PTE_G); break; } } if (prot & VM_PROT_WRITE) pte_lo |= PTE_BW; else pte_lo |= PTE_BR; pvo_flags |= (prot & VM_PROT_EXECUTE); if (wired) pvo_flags |= PVO_WIRED; error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); /* * Flush the real page from the instruction cache if this page is * mapped executable and cacheable and was not previously mapped (or * was not mapped executable). */ if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0 && was_exec == 0) { /* * Flush the real memory from the cache. */ pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); if (pg != NULL) pmap_attr_save(pg, PTE_EXEC); } } vm_offset_t pmap_extract(pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } return (0); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { CTR0(KTR_PMAP, "pmap_init"); pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16); pmap_pvo_count = 0; pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf); pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf); pmap_initialized = TRUE; } void pmap_init2(void) { CTR0(KTR_PMAP, "pmap_init2"); } boolean_t pmap_is_modified(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return (FALSE); return (pmap_query_bit(m, PTE_CHG)); } void pmap_clear_reference(vm_page_t m) { TODO; } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { TODO; return (0); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { u_int pte_lo; int error; int i; #if 0 if (va < VM_MIN_KERNEL_ADDRESS) panic("pmap_kenter: attempt to enter non-kernel address %#x", va); #endif pte_lo = PTE_I | PTE_G | PTE_BW; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { pte_lo &= ~(PTE_I | PTE_G); break; } } error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); if (error != 0 && error != ENOENT) panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, pa, error); /* * Flush the real memory from the instruction cache. */ if ((pte_lo & (PTE_I | PTE_G)) == 0) { pmap_syncicache(pa, PAGE_SIZE); } } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); if (pvo == NULL) { return (0); } return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { vm_offset_t sva, va; sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter(va, pa_start); *virt = va; return (sva); } int pmap_mincore(pmap_t pmap, vm_offset_t addr) { TODO; return (0); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); /* XXX */ } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; struct pte *pt; /* * Since the routine only downgrades protection, if the * maximal protection is desired, there isn't any change * to be made. */ if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE)) return; pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); PMAP_PVO_CHECK(pvo); /* sanity check */ /* * Downgrading to no mapping at all, we just remove the entry. */ if ((prot & VM_PROT_READ) == 0) { pmap_pvo_remove(pvo, -1); continue; } /* * If EXEC permission is being revoked, just clear the flag * in the PVO. */ if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * If this entry is already RO, don't diddle with the page * table. */ if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { PMAP_PVO_CHECK(pvo); continue; } /* * Grab the PTE before we diddle the bits so pvo_to_pte can * verify the pte contents are as expected. */ pt = pmap_pvo_to_pte(pvo, -1); pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PMAP_PVO_CHECK(pvo); /* sanity check */ } } /* * Make the specified page pageable (or not). Unneeded. */ void pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { TODO; return (0); } static u_int pmap_vsidcontext; void pmap_pinit(pmap_t pmap) { int i, mask; u_int entropy; entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); /* * Allocate some segment registers for this pmap. */ for (i = 0; i < NPMAPS; i += VSID_NBPW) { u_int hash, n; /* * Create a new value by mutiplying by a prime and adding in * entropy from the timebase register. This is to make the * VSID more random so that the PT hash function collides * less often. (Note that the prime casues gcc to do shifts * instead of a multiply.) */ pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; hash = pmap_vsidcontext & (NPMAPS - 1); if (hash == 0) /* 0 is special, avoid it */ continue; n = hash >> 5; mask = 1 << (hash & (VSID_NBPW - 1)); hash = (pmap_vsidcontext & 0xfffff); if (pmap_vsid_bitmap[n] & mask) { /* collision? */ /* anything free in this bucket? */ if (pmap_vsid_bitmap[n] == 0xffffffff) { entropy = (pmap_vsidcontext >> 20); continue; } i = ffs(~pmap_vsid_bitmap[i]) - 1; mask = 1 << i; hash &= 0xfffff & ~(VSID_NBPW - 1); hash |= i; } pmap_vsid_bitmap[n] |= mask; for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); return; } panic("pmap_pinit: out of segments"); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { pmap_pinit(pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } void pmap_pinit2(pmap_t pmap) { /* XXX: Remove this stub when no longer called */ } void pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_prefault: non current pmap")); /* XXX */ } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo; struct pte *pt; int pteidx; CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo == NULL) continue; if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE * copy. */ pt = pmap_pvo_to_pte(pvo, pteidx); /* * Change the protection of the page. */ pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; /* * If the PVO is in the page table, update that pte as well. */ if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); } } vm_offset_t pmap_phys_address(int ppn) { TODO; return (0); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t va, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kremove(va); } void pmap_release(pmap_t pmap) { TODO; } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo; int pteidx; for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo != NULL) { pmap_pvo_remove(pvo, pteidx); } } } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove in the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); pmap_remove(pm, sva, eva); } /* * Create the kernel stack and pcb for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. */ void pmap_new_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; u_int i; /* * Allocate object for the kstack. */ ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; /* * Get a kernel virtual address for the kstack for this thread. */ ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); TLBIE(ks); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; td->td_kstack = ks; for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page. */ - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(ksobj, i, + VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); /* - * Wire the page. - */ - m->wire_count++; - - /* * Enter the page into the kernel address space. */ pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } void pmap_dispose_thread(struct thread *td) { TODO; } void pmap_swapin_thread(struct thread *td) { TODO; } void pmap_swapout_thread(struct thread *td) { TODO; } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size, u_int align) { vm_offset_t s, e; int i, j; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (align != 0) s = (phys_avail[i] + align - 1) & ~(align - 1); else s = phys_avail[i]; e = s + size; if (s < phys_avail[i] || e > phys_avail[i + 1]) continue; if (s == phys_avail[i]) { phys_avail[i] += size; } else if (e == phys_avail[i + 1]) { phys_avail[i + 1] -= size; } else { for (j = phys_avail_count * 2; j > i; j -= 2) { phys_avail[j] = phys_avail[j - 2]; phys_avail[j + 1] = phys_avail[j - 1]; } phys_avail[i + 3] = phys_avail[i + 1]; phys_avail[i + 1] = s; phys_avail[i + 2] = e; phys_avail_count++; } return (s); } panic("pmap_bootstrap_alloc: could not allocate memory"); } /* * Return an unmapped pvo for a kernel virtual address. * Used by pmap functions that operate on physical pages. */ static struct pvo_entry * pmap_rkva_alloc(void) { struct pvo_entry *pvo; struct pte *pt; vm_offset_t kva; int pteidx; if (pmap_rkva_count == 0) panic("pmap_rkva_alloc: no more reserved KVAs"); kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); pmap_kenter(kva, 0); pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); if (pvo == NULL) panic("pmap_kva_alloc: pmap_pvo_find_va failed"); pt = pmap_pvo_to_pte(pvo, pteidx); if (pt == NULL) panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; return (pvo); } static void pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, int *depth_p) { struct pte *pt; /* * If this pvo already has a valid pte, we need to save it so it can * be restored later. We then just reload the new PTE over the old * slot. */ if (saved_pt != NULL) { pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } *saved_pt = pvo->pvo_pte; pvo->pvo_pte.pte_lo &= ~PTE_RPGN; } pvo->pvo_pte.pte_lo |= pa; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_map: could not spill pvo %p", pvo); if (depth_p != NULL) (*depth_p)++; } static void pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) { struct pte *pt; pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } pvo->pvo_pte.pte_lo &= ~PTE_RPGN; /* * If there is a saved PTE and it's valid, restore it and return. */ if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { if (depth_p != NULL && --(*depth_p) == 0) panic("pmap_pa_unmap: restoring but depth == 0"); pvo->pvo_pte = *saved_pt; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_unmap: could not spill pvo %p", pvo); } } static void pmap_syncicache(vm_offset_t pa, vm_size_t len) { __syncicache((void *)pa, len); } static void tlbia(void) { caddr_t i; SYNC(); for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { TLBIE(i); EIEIO(); } TLBSYNC(); SYNC(); } static int pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) { struct pvo_entry *pvo; u_int sr; int first; u_int ptegidx; int i; pmap_pvo_enter_calls++; first = 0; /* * Compute the PTE Group index. */ va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); /* * Remove any existing mapping for this page. Reuse the pvo entry if * there is a mapping. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && (pvo->pvo_pte.pte_lo & PTE_PP) == (pte_lo & PTE_PP)) { return (0); } pmap_pvo_remove(pvo, -1); break; } } /* * If we aren't overwriting a mapping, try to allocate. */ if (pmap_initialized) { pvo = uma_zalloc(zone, M_NOWAIT); } else { if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", pmap_bpvo_pool_index, BPVO_POOL_SIZE, BPVO_POOL_SIZE * sizeof(struct pvo_entry)); } pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; pmap_bpvo_pool_index++; pvo->pvo_vaddr |= PVO_BOOTSTRAP; } if (pvo == NULL) { return (ENOMEM); } pmap_pvo_entries++; pvo->pvo_vaddr = va; pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; if (flags & VM_PROT_EXECUTE) pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &pmap_pvo_kunmanaged) pvo->pvo_vaddr |= PVO_MANAGED; pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); /* * Remember if the list was empty and therefore will be the first * item. */ if (LIST_FIRST(pvo_head) == NULL) first = 1; LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count++; pvo->pvo_pmap->pm_stats.resident_count++; /* * We hope this succeeds but it isn't required. */ i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (i >= 0) { PVO_PTEGIDX_SET(pvo, i); } else { panic("pmap_pvo_enter: overflow"); pmap_pte_overflow++; } return (first ? ENOENT : 0); } static void pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If there is an active pte entry, we need to deactivate it (and * save the ref & cfg bits). */ pt = pmap_pvo_to_pte(pvo, pteidx); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); } else { pmap_pte_overflow--; } /* * Update our statistics. */ pvo->pvo_pmap->pm_stats.resident_count--; if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count--; /* * Save the REF/CHG bits into their cache if the page is managed. */ if (pvo->pvo_vaddr & PVO_MANAGED) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); if (pg != NULL) { pmap_attr_save(pg, pvo->pvo_pte.pte_lo & (PTE_REF | PTE_CHG)); } } /* * Remove this PVO from the PV list. */ LIST_REMOVE(pvo, pvo_vlink); /* * Remove this from the overflow list and return it to the pool * if we aren't going to reuse it. */ LIST_REMOVE(pvo, pvo_olink); if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone, pvo); pmap_pvo_entries--; pmap_pvo_remove_calls++; } static __inline int pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) { int pteidx; /* * We can find the actual pte entry without searching by grabbing * the PTEG index from 3 unused bits in pte_lo[11:9] and by * noticing the HID bit. */ pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); if (pvo->pvo_pte.pte_hi & PTE_HID) pteidx ^= pmap_pteg_mask * 8; return (pteidx); } static struct pvo_entry * pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) { struct pvo_entry *pvo; int ptegidx; u_int sr; va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if (pteidx_p) *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); return (pvo); } } return (NULL); } static struct pte * pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If we haven't been supplied the ptegidx, calculate it. */ if (pteidx == -1) { int ptegidx; u_int sr; sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); pteidx = pmap_pvo_pte_index(pvo, ptegidx); } pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " "valid pte index", pvo); } if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " "pvo but no valid pte", pvo); } if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { panic("pmap_pvo_to_pte: pvo %p has valid pte in " "pmap_pteg_table %p but invalid in pvo", pvo, pt); } if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { panic("pmap_pvo_to_pte: pvo %p pte does not match " "pte %p in pmap_pteg_table", pvo, pt); } return (pt); } if (pvo->pvo_pte.pte_hi & PTE_VALID) { panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " "pmap_pteg_table but valid in pvo", pvo, pt); } return (NULL); } static void * pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { vm_page_t m; if (bytes != PAGE_SIZE) panic("pmap_pvo_allocf: benno was shortsighted. hit him."); *flags = UMA_SLAB_PRIV; m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM); if (m == NULL) return (NULL); pmap_pvo_count++; return ((void *)VM_PAGE_TO_PHYS(m)); } /* * XXX: THIS STUFF SHOULD BE IN pte.c? */ int pmap_pte_spill(vm_offset_t addr) { struct pvo_entry *source_pvo, *victim_pvo; struct pvo_entry *pvo; int ptegidx, i, j; u_int sr; struct pteg *pteg; struct pte *pt; pmap_pte_spills++; sr = mfsrin(addr); ptegidx = va_to_pteg(sr, addr); /* * Have to substitute some entry. Use the primary hash for this. * Use low bits of timebase as random generator. */ pteg = &pmap_pteg_table[ptegidx]; __asm __volatile("mftb %0" : "=r"(i)); i &= 7; pt = &pteg->pt[i]; source_pvo = NULL; victim_pvo = NULL; LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { /* * We need to find a pvo entry for this address. */ PMAP_PVO_CHECK(pvo); if (source_pvo == NULL && pmap_pte_match(&pvo->pvo_pte, sr, addr, pvo->pvo_pte.pte_hi & PTE_HID)) { /* * Now found an entry to be spilled into the pteg. * The PTE is now valid, so we know it's active. */ j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (j >= 0) { PVO_PTEGIDX_SET(pvo, j); pmap_pte_overflow--; PMAP_PVO_CHECK(pvo); return (1); } source_pvo = pvo; if (victim_pvo != NULL) break; } /* * We also need the pvo entry of the victim we are replacing * so save the R & C bits of the PTE. */ if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; if (source_pvo != NULL) break; } } if (source_pvo == NULL) return (0); if (victim_pvo == NULL) { if ((pt->pte_hi & PTE_HID) == 0) panic("pmap_pte_spill: victim p-pte (%p) has no pvo" "entry", pt); /* * If this is a secondary PTE, we need to search it's primary * pvo bucket for the matching PVO. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], pvo_olink) { PMAP_PVO_CHECK(pvo); /* * We also need the pvo entry of the victim we are * replacing so save the R & C bits of the PTE. */ if (pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; break; } } if (victim_pvo == NULL) panic("pmap_pte_spill: victim s-pte (%p) has no pvo" "entry", pt); } /* * We are invalidating the TLB entry for the EA we are replacing even * though it's valid. If we don't, we lose any ref/chg bit changes * contained in the TLB entry. */ source_pvo->pvo_pte.pte_hi &= ~PTE_HID; pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); pmap_pte_set(pt, &source_pvo->pvo_pte); PVO_PTEGIDX_CLR(victim_pvo); PVO_PTEGIDX_SET(source_pvo, i); pmap_pte_replacements++; PMAP_PVO_CHECK(victim_pvo); PMAP_PVO_CHECK(source_pvo); return (1); } static int pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) { struct pte *pt; int i; /* * First try primary hash. */ for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi &= ~PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } /* * Now try secondary hash. */ ptegidx ^= pmap_pteg_mask; ptegidx++; for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi |= PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } panic("pmap_pte_insert: overflow"); return (-1); } static boolean_t pmap_query_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; if (pmap_attr_fetch(m) & ptebit) return (TRUE); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if we saved the bit off. If so, cache it and return * success. */ if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } /* * No luck, now go through the hard part of looking at the PTEs * themselves. Sync so that any pending REF/CHG bits are flushed to * the PTEs. */ SYNC(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if this pvo has a valid PTE. if so, fetch the * REF/CHG bits from the valid PTE. If the appropriate * ptebit is set, cache it and return success. */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } } return (TRUE); } static boolean_t pmap_clear_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; int rv; /* * Clear the cached value. */ rv = pmap_attr_fetch(m); pmap_attr_clear(m, ptebit); /* * Sync so that any pending REF/CHG bits are flushed to the PTEs (so * we can reset the right ones). note that since the pvo entries and * list heads are accessed via BAT0 and are never placed in the page * table, we don't have to worry about further accesses setting the * REF/CHG bits. */ SYNC(); /* * For each pvo entry, clear the pvo's ptebit. If this pvo has a * valid pte clear the ptebit from the valid pte. */ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); } rv |= pvo->pvo_pte.pte_lo; pvo->pvo_pte.pte_lo &= ~ptebit; PMAP_PVO_CHECK(pvo); /* sanity check */ } return ((rv & ptebit) != 0); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, tmpva, offset; pa = trunc_page(pa); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); GIANT_REQUIRED; va = kmem_alloc_pageable(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { pmap_kenter(tmpva, pa); TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } return ((void *)(va + offset)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); kmem_free(kernel_map, base, size); } Index: head/sys/powerpc/powerpc/pmap.c =================================================================== --- head/sys/powerpc/powerpc/pmap.c (revision 101345) +++ head/sys/powerpc/powerpc/pmap.c (revision 101346) @@ -1,2276 +1,2271 @@ /* * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Matt Thomas of Allegro Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */ /* * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #define TODO panic("%s: not implemented", __func__); #define PMAP_LOCK(pm) #define PMAP_UNLOCK(pm) #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) #define TLBSYNC() __asm __volatile("tlbsync"); #define SYNC() __asm __volatile("sync"); #define EIEIO() __asm __volatile("eieio"); #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) #define VSID_TO_SR(vsid) ((vsid) & 0xf) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ #define PVO_WIRED 0x0010 /* PVO entry is wired */ #define PVO_MANAGED 0x0020 /* PVO entry is managed */ #define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ #define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during bootstrap */ #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) #define PVO_PTEGIDX_CLR(pvo) \ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) #define PVO_PTEGIDX_SET(pvo, i) \ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PMAP_PVO_CHECK(pvo) struct ofw_map { vm_offset_t om_va; vm_size_t om_len; vm_offset_t om_pa; u_int om_mode; }; int pmap_bootstrapped = 0; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; /* * Physical addresses of first and last available physical page. */ vm_offset_t avail_start; vm_offset_t avail_end; /* * Map of physical memory regions. */ vm_offset_t phys_avail[128]; u_int phys_avail_count; static struct mem_region *regions; static struct mem_region *pregions; int regions_sz, pregions_sz; static struct ofw_map *translations; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * Kernel pmap. */ struct pmap kernel_pmap_store; extern struct pmap ofw_pmap; /* * PTEG data. */ static struct pteg *pmap_pteg_table; u_int pmap_pteg_count; u_int pmap_pteg_mask; /* * PVO data. */ struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ struct vm_object pmap_upvo_zone_obj; struct vm_object pmap_mpvo_zone_obj; static vm_object_t pmap_pvo_obj; static u_int pmap_pvo_count; #define BPVO_POOL_SIZE 32768 static struct pvo_entry *pmap_bpvo_pool; static int pmap_bpvo_pool_index = 0; #define VSID_NBPW (sizeof(u_int32_t) * 8) static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; static boolean_t pmap_initialized = FALSE; /* * Statistics. */ u_int pmap_pte_valid = 0; u_int pmap_pte_overflow = 0; u_int pmap_pte_replacements = 0; u_int pmap_pvo_entries = 0; u_int pmap_pvo_enter_calls = 0; u_int pmap_pvo_remove_calls = 0; u_int pmap_pte_spills = 0; SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, &pmap_pte_overflow, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, &pmap_pte_replacements, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, &pmap_pvo_enter_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, &pmap_pvo_remove_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, &pmap_pte_spills, 0, ""); struct pvo_entry *pmap_pvo_zeropage; vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; u_int pmap_rkva_count = 4; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); /* * PTE calls. */ static int pmap_pte_insert(u_int, struct pte *); /* * PVO calls. */ static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, vm_offset_t, vm_offset_t, u_int, int); static void pmap_pvo_remove(struct pvo_entry *, int); static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ static void * pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int); static struct pvo_entry *pmap_rkva_alloc(void); static void pmap_pa_map(struct pvo_entry *, vm_offset_t, struct pte *, int *); static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); static void pmap_syncicache(vm_offset_t, vm_size_t); static boolean_t pmap_query_bit(vm_page_t, int); static boolean_t pmap_clear_bit(vm_page_t, int); static void tlbia(void); static __inline int va_to_sr(u_int *sr, vm_offset_t va) { return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); } static __inline u_int va_to_pteg(u_int sr, vm_offset_t addr) { u_int hash; hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); return (hash & pmap_pteg_mask); } static __inline struct pvo_head * pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pa); if (pg_p != NULL) *pg_p = pg; if (pg == NULL) return (&pmap_pvo_unmanaged); return (&pg->md.mdpg_pvoh); } static __inline struct pvo_head * vm_page_to_pvoh(vm_page_t m) { return (&m->md.mdpg_pvoh); } static __inline void pmap_attr_clear(vm_page_t m, int ptebit) { m->md.mdpg_attrs &= ~ptebit; } static __inline int pmap_attr_fetch(vm_page_t m) { return (m->md.mdpg_attrs); } static __inline void pmap_attr_save(vm_page_t m, int ptebit) { m->md.mdpg_attrs |= ptebit; } static __inline int pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) { if (pt->pte_hi == pvo_pt->pte_hi) return (1); return (0); } static __inline int pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) { return (pt->pte_hi & ~PTE_VALID) == (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | ((va >> ADDR_API_SHFT) & PTE_API) | which); } static __inline void pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) { /* * Construct a PTE. Default to IMB initially. Valid bit only gets * set when the real pte is set in memory. * * Note: Don't set the valid bit for correct operation of tlb update. */ pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); pt->pte_lo = pte_lo; } static __inline void pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); } static __inline void pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) { /* * As shown in Section 7.6.3.2.3 */ pt->pte_lo &= ~ptebit; TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); } static __inline void pmap_pte_set(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_hi |= PTE_VALID; /* * Update the PTE as defined in section 7.6.3.1. * Note that the REF/CHG bits are from pvo_pt and thus should havce * been saved so this routine can restore them (if desired). */ pt->pte_lo = pvo_pt->pte_lo; EIEIO(); pt->pte_hi = pvo_pt->pte_hi; SYNC(); pmap_pte_valid++; } static __inline void pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { pvo_pt->pte_hi &= ~PTE_VALID; /* * Force the reg & chg bits back into the PTEs. */ SYNC(); /* * Invalidate the pte. */ pt->pte_hi &= ~PTE_VALID; SYNC(); TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); /* * Save the reg & chg bits. */ pmap_pte_synch(pt, pvo_pt); pmap_pte_valid--; } static __inline void pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { /* * Invalidate the PTE */ pmap_pte_unset(pt, pvo_pt, va); pmap_pte_set(pt, pvo_pt); } /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *regiona; const struct mem_region *regionb; regiona = a; regionb = b; if (regiona->mr_start < regionb->mr_start) return (-1); else if (regiona->mr_start > regionb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *mapa; const struct ofw_map *mapb; mapa = a; mapb = b; if (mapa->om_pa < mapb->om_pa) return (-1); else if (mapa->om_pa > mapb->om_pa) return (1); else return (0); } void pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen, mmu; int sz; int i, j; vm_size_t size, physsz; vm_offset_t pa, va, off; u_int batl, batu; /* * Set up BAT0 to only map the lowest 256 MB area */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); /* * Map PCI memory space. */ battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); /* * Map obio devices. */ battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); /* * Use an IBAT and a DBAT to map the bottom segment of memory * where we are. */ batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x00000000, BAT_M, BAT_PP_RW); __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" :: "r"(batu), "r"(batl)); #if 0 /* map frame buffer */ batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif #if 1 /* map pci space */ batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r"(batu), "r"(batl)); #endif /* * Set the start and end of kva. */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); for (i = 0; i < pregions_sz; i++) { CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", pregions[i].mr_start, pregions[i].mr_start + pregions[i].mr_size, pregions[i].mr_size); } if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) panic("pmap_bootstrap: phys_avail too small"); qsort(regions, regions_sz, sizeof(*regions), mr_cmp); phys_avail_count = 0; physsz = 0; for (i = 0, j = 0; i < regions_sz; i++, j += 2) { CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, regions[i].mr_start + regions[i].mr_size, regions[i].mr_size); phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; phys_avail_count++; physsz += regions[i].mr_size; } physmem = btoc(physsz); /* * Allocate PTEG table. */ #ifdef PTEGCOUNT pmap_pteg_count = PTEGCOUNT; #else pmap_pteg_count = 0x1000; while (pmap_pteg_count < physmem) pmap_pteg_count <<= 1; pmap_pteg_count >>= 1; #endif /* PTEGCOUNT */ size = pmap_pteg_count * sizeof(struct pteg); CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, size); pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); pmap_pteg_mask = pmap_pteg_count - 1; /* * Allocate pv/overflow lists. */ size = sizeof(struct pvo_head) * pmap_pteg_count; pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, PAGE_SIZE); CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); for (i = 0; i < pmap_pteg_count; i++) LIST_INIT(&pmap_pvo_table[i]); /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); /* * Initialise the unmanaged pvo pool. */ pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); pmap_bpvo_pool_index = 0; /* * Make sure kernel vsid is allocated as well as VSID 0. */ pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); pmap_vsid_bitmap[0] |= 1; /* * Set up the OpenFirmware pmap and add it's mappings. */ pmap_pinit(&ofw_pmap); ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; if ((chosen = OF_finddevice("/chosen")) == -1) panic("pmap_bootstrap: can't find /chosen"); OF_getprop(chosen, "mmu", &mmui, 4); if ((mmu = OF_instance_to_package(mmui)) == -1) panic("pmap_bootstrap: can't get mmu package"); if ((sz = OF_getproplen(mmu, "translations")) == -1) panic("pmap_bootstrap: can't get ofw translation count"); translations = NULL; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (phys_avail[i + 1] >= sz) translations = (struct ofw_map *)phys_avail[i]; } if (translations == NULL) panic("pmap_bootstrap: no space to copy translations"); bzero(translations, sz); if (OF_getprop(mmu, "translations", translations, sz) == -1) panic("pmap_bootstrap: can't get ofw translations"); CTR0(KTR_PMAP, "pmap_bootstrap: translations"); sz /= sizeof(*translations); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", translations[i].om_pa, translations[i].om_va, translations[i].om_len); /* Drop stuff below something? */ /* Enter the pages? */ for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { struct vm_page m; m.phys_addr = translations[i].om_pa + off; pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, VM_PROT_ALL, 1); } } #ifdef SMP TLBSYNC(); #endif /* * Initialize the kernel pmap (which is statically allocated). */ for (i = 0; i < 16; i++) { kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; } kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; kernel_pmap->pm_active = ~0; /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. */ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); kstack0_phys = pa; kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, kstack0); virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; pmap_kenter(va, pa); TLBIE(va); } /* * Calculate the first and last available physical addresses. */ avail_start = phys_avail[0]; for (i = 0; phys_avail[i + 2] != 0; i += 2) ; avail_end = phys_avail[i + 1]; Maxmem = powerpc_btop(avail_end); /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize hardware. */ for (i = 0; i < 16; i++) { mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); } __asm __volatile ("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); __asm __volatile ("sync; mtsdr1 %0; isync" :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); tlbia(); pmap_bootstrapped++; } /* * Activate a user pmap. The pmap must be activated before it's address * space can be accessed in any way. */ void pmap_activate(struct thread *td) { pmap_t pm, pmr; /* * Load all the data we need up front to encourasge the compiler to * not issue any loads while we have interrupts disabled below. */ pm = &td->td_proc->p_vmspace->vm_pmap; KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) pmr = pm; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(curpmap, pmr); } void pmap_deactivate(struct thread *td) { pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; pm->pm_active &= ~(PCPU_GET(cpumask)); PCPU_SET(curpmap, NULL); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { if (wired) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) pm->pm_stats.wired_count++; pvo->pvo_vaddr |= PVO_WIRED; } else { if ((pvo->pvo_vaddr & PVO_WIRED) != 0) pm->pm_stats.wired_count--; pvo->pvo_vaddr &= ~PVO_WIRED; } } } void pmap_clear_modify(vm_page_t m) { if (m->flags * PG_FICTITIOUS) return; pmap_clear_bit(m, PTE_CHG); } void pmap_collect(void) { TODO; } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { /* * This is not needed as it's mainly an optimisation. * It may want to be implemented later though. */ } void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); kcopy((void *)src, (void *)dst, PAGE_SIZE); } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_page_t m) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, PAGE_SIZE); for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, size); for (i = size / CACHELINESIZE; i > 0; i--) { __asm __volatile("dcbz 0,%0" :: "r"(va)); va += CACHELINESIZE; } if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_idle(vm_page_t m) { /* XXX this is called outside of Giant, is pmap_zero_page safe? */ /* XXX maybe have a dedicated mapping for this to avoid the problem? */ mtx_lock(&Giant); pmap_zero_page(m); mtx_unlock(&Giant); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct pvo_head *pvo_head; uma_zone_t zone; vm_page_t pg; u_int pte_lo, pvo_flags, was_exec, i; int error; if (!pmap_initialized) { pvo_head = &pmap_pvo_kunmanaged; zone = pmap_upvo_zone; pvo_flags = 0; pg = NULL; was_exec = PTE_EXEC; } else { pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg); zone = pmap_mpvo_zone; pvo_flags = PVO_MANAGED; was_exec = 0; } /* * If this is a managed page, and it's the first reference to the page, * clear the execness of the page. Otherwise fetch the execness. */ if (pg != NULL) { if (LIST_EMPTY(pvo_head)) { pmap_attr_clear(pg, PTE_EXEC); } else { was_exec = pmap_attr_fetch(pg) & PTE_EXEC; } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ pte_lo = PTE_I | PTE_G; for (i = 0; i < pregions_sz; i++) { if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && (VM_PAGE_TO_PHYS(m) < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(PTE_I | PTE_G); break; } } if (prot & VM_PROT_WRITE) pte_lo |= PTE_BW; else pte_lo |= PTE_BR; pvo_flags |= (prot & VM_PROT_EXECUTE); if (wired) pvo_flags |= PVO_WIRED; error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); /* * Flush the real page from the instruction cache if this page is * mapped executable and cacheable and was not previously mapped (or * was not mapped executable). */ if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0 && was_exec == 0) { /* * Flush the real memory from the cache. */ pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); if (pg != NULL) pmap_attr_save(pg, PTE_EXEC); } } vm_offset_t pmap_extract(pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } return (0); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { CTR0(KTR_PMAP, "pmap_init"); pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16); pmap_pvo_count = 0; pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf); pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf); pmap_initialized = TRUE; } void pmap_init2(void) { CTR0(KTR_PMAP, "pmap_init2"); } boolean_t pmap_is_modified(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return (FALSE); return (pmap_query_bit(m, PTE_CHG)); } void pmap_clear_reference(vm_page_t m) { TODO; } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { TODO; return (0); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { u_int pte_lo; int error; int i; #if 0 if (va < VM_MIN_KERNEL_ADDRESS) panic("pmap_kenter: attempt to enter non-kernel address %#x", va); #endif pte_lo = PTE_I | PTE_G | PTE_BW; for (i = 0; phys_avail[i + 2] != 0; i += 2) { if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { pte_lo &= ~(PTE_I | PTE_G); break; } } error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); if (error != 0 && error != ENOENT) panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, pa, error); /* * Flush the real memory from the instruction cache. */ if ((pte_lo & (PTE_I | PTE_G)) == 0) { pmap_syncicache(pa, PAGE_SIZE); } } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct pvo_entry *pvo; pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); if (pvo == NULL) { return (0); } return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { vm_offset_t sva, va; sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter(va, pa_start); *virt = va; return (sva); } int pmap_mincore(pmap_t pmap, vm_offset_t addr) { TODO; return (0); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); /* XXX */ } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; struct pte *pt; /* * Since the routine only downgrades protection, if the * maximal protection is desired, there isn't any change * to be made. */ if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE)) return; pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); PMAP_PVO_CHECK(pvo); /* sanity check */ /* * Downgrading to no mapping at all, we just remove the entry. */ if ((prot & VM_PROT_READ) == 0) { pmap_pvo_remove(pvo, -1); continue; } /* * If EXEC permission is being revoked, just clear the flag * in the PVO. */ if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * If this entry is already RO, don't diddle with the page * table. */ if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { PMAP_PVO_CHECK(pvo); continue; } /* * Grab the PTE before we diddle the bits so pvo_to_pte can * verify the pte contents are as expected. */ pt = pmap_pvo_to_pte(pvo, -1); pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PMAP_PVO_CHECK(pvo); /* sanity check */ } } /* * Make the specified page pageable (or not). Unneeded. */ void pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { TODO; return (0); } static u_int pmap_vsidcontext; void pmap_pinit(pmap_t pmap) { int i, mask; u_int entropy; entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); /* * Allocate some segment registers for this pmap. */ for (i = 0; i < NPMAPS; i += VSID_NBPW) { u_int hash, n; /* * Create a new value by mutiplying by a prime and adding in * entropy from the timebase register. This is to make the * VSID more random so that the PT hash function collides * less often. (Note that the prime casues gcc to do shifts * instead of a multiply.) */ pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; hash = pmap_vsidcontext & (NPMAPS - 1); if (hash == 0) /* 0 is special, avoid it */ continue; n = hash >> 5; mask = 1 << (hash & (VSID_NBPW - 1)); hash = (pmap_vsidcontext & 0xfffff); if (pmap_vsid_bitmap[n] & mask) { /* collision? */ /* anything free in this bucket? */ if (pmap_vsid_bitmap[n] == 0xffffffff) { entropy = (pmap_vsidcontext >> 20); continue; } i = ffs(~pmap_vsid_bitmap[i]) - 1; mask = 1 << i; hash &= 0xfffff & ~(VSID_NBPW - 1); hash |= i; } pmap_vsid_bitmap[n] |= mask; for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); return; } panic("pmap_pinit: out of segments"); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { pmap_pinit(pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } void pmap_pinit2(pmap_t pmap) { /* XXX: Remove this stub when no longer called */ } void pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_prefault: non current pmap")); /* XXX */ } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo; struct pte *pt; int pteidx; CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo == NULL) continue; if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE * copy. */ pt = pmap_pvo_to_pte(pvo, pteidx); /* * Change the protection of the page. */ pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; /* * If the PVO is in the page table, update that pte as well. */ if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); } } vm_offset_t pmap_phys_address(int ppn) { TODO; return (0); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t va, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kremove(va); } void pmap_release(pmap_t pmap) { TODO; } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo; int pteidx; for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo != NULL) { pmap_pvo_remove(pvo, pteidx); } } } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove in the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); pmap_remove(pm, sva, eva); } /* * Create the kernel stack and pcb for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. */ void pmap_new_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; u_int i; /* * Allocate object for the kstack. */ ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; /* * Get a kernel virtual address for the kstack for this thread. */ ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); TLBIE(ks); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; td->td_kstack = ks; for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page. */ - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(ksobj, i, + VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); /* - * Wire the page. - */ - m->wire_count++; - - /* * Enter the page into the kernel address space. */ pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } void pmap_dispose_thread(struct thread *td) { TODO; } void pmap_swapin_thread(struct thread *td) { TODO; } void pmap_swapout_thread(struct thread *td) { TODO; } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size, u_int align) { vm_offset_t s, e; int i, j; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (align != 0) s = (phys_avail[i] + align - 1) & ~(align - 1); else s = phys_avail[i]; e = s + size; if (s < phys_avail[i] || e > phys_avail[i + 1]) continue; if (s == phys_avail[i]) { phys_avail[i] += size; } else if (e == phys_avail[i + 1]) { phys_avail[i + 1] -= size; } else { for (j = phys_avail_count * 2; j > i; j -= 2) { phys_avail[j] = phys_avail[j - 2]; phys_avail[j + 1] = phys_avail[j - 1]; } phys_avail[i + 3] = phys_avail[i + 1]; phys_avail[i + 1] = s; phys_avail[i + 2] = e; phys_avail_count++; } return (s); } panic("pmap_bootstrap_alloc: could not allocate memory"); } /* * Return an unmapped pvo for a kernel virtual address. * Used by pmap functions that operate on physical pages. */ static struct pvo_entry * pmap_rkva_alloc(void) { struct pvo_entry *pvo; struct pte *pt; vm_offset_t kva; int pteidx; if (pmap_rkva_count == 0) panic("pmap_rkva_alloc: no more reserved KVAs"); kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); pmap_kenter(kva, 0); pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); if (pvo == NULL) panic("pmap_kva_alloc: pmap_pvo_find_va failed"); pt = pmap_pvo_to_pte(pvo, pteidx); if (pt == NULL) panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; return (pvo); } static void pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, int *depth_p) { struct pte *pt; /* * If this pvo already has a valid pte, we need to save it so it can * be restored later. We then just reload the new PTE over the old * slot. */ if (saved_pt != NULL) { pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } *saved_pt = pvo->pvo_pte; pvo->pvo_pte.pte_lo &= ~PTE_RPGN; } pvo->pvo_pte.pte_lo |= pa; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_map: could not spill pvo %p", pvo); if (depth_p != NULL) (*depth_p)++; } static void pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) { struct pte *pt; pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } pvo->pvo_pte.pte_lo &= ~PTE_RPGN; /* * If there is a saved PTE and it's valid, restore it and return. */ if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { if (depth_p != NULL && --(*depth_p) == 0) panic("pmap_pa_unmap: restoring but depth == 0"); pvo->pvo_pte = *saved_pt; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_unmap: could not spill pvo %p", pvo); } } static void pmap_syncicache(vm_offset_t pa, vm_size_t len) { __syncicache((void *)pa, len); } static void tlbia(void) { caddr_t i; SYNC(); for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { TLBIE(i); EIEIO(); } TLBSYNC(); SYNC(); } static int pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) { struct pvo_entry *pvo; u_int sr; int first; u_int ptegidx; int i; pmap_pvo_enter_calls++; first = 0; /* * Compute the PTE Group index. */ va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); /* * Remove any existing mapping for this page. Reuse the pvo entry if * there is a mapping. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && (pvo->pvo_pte.pte_lo & PTE_PP) == (pte_lo & PTE_PP)) { return (0); } pmap_pvo_remove(pvo, -1); break; } } /* * If we aren't overwriting a mapping, try to allocate. */ if (pmap_initialized) { pvo = uma_zalloc(zone, M_NOWAIT); } else { if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", pmap_bpvo_pool_index, BPVO_POOL_SIZE, BPVO_POOL_SIZE * sizeof(struct pvo_entry)); } pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; pmap_bpvo_pool_index++; pvo->pvo_vaddr |= PVO_BOOTSTRAP; } if (pvo == NULL) { return (ENOMEM); } pmap_pvo_entries++; pvo->pvo_vaddr = va; pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; if (flags & VM_PROT_EXECUTE) pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &pmap_pvo_kunmanaged) pvo->pvo_vaddr |= PVO_MANAGED; pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); /* * Remember if the list was empty and therefore will be the first * item. */ if (LIST_FIRST(pvo_head) == NULL) first = 1; LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count++; pvo->pvo_pmap->pm_stats.resident_count++; /* * We hope this succeeds but it isn't required. */ i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (i >= 0) { PVO_PTEGIDX_SET(pvo, i); } else { panic("pmap_pvo_enter: overflow"); pmap_pte_overflow++; } return (first ? ENOENT : 0); } static void pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If there is an active pte entry, we need to deactivate it (and * save the ref & cfg bits). */ pt = pmap_pvo_to_pte(pvo, pteidx); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); } else { pmap_pte_overflow--; } /* * Update our statistics. */ pvo->pvo_pmap->pm_stats.resident_count--; if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count--; /* * Save the REF/CHG bits into their cache if the page is managed. */ if (pvo->pvo_vaddr & PVO_MANAGED) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); if (pg != NULL) { pmap_attr_save(pg, pvo->pvo_pte.pte_lo & (PTE_REF | PTE_CHG)); } } /* * Remove this PVO from the PV list. */ LIST_REMOVE(pvo, pvo_vlink); /* * Remove this from the overflow list and return it to the pool * if we aren't going to reuse it. */ LIST_REMOVE(pvo, pvo_olink); if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone, pvo); pmap_pvo_entries--; pmap_pvo_remove_calls++; } static __inline int pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) { int pteidx; /* * We can find the actual pte entry without searching by grabbing * the PTEG index from 3 unused bits in pte_lo[11:9] and by * noticing the HID bit. */ pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); if (pvo->pvo_pte.pte_hi & PTE_HID) pteidx ^= pmap_pteg_mask * 8; return (pteidx); } static struct pvo_entry * pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) { struct pvo_entry *pvo; int ptegidx; u_int sr; va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if (pteidx_p) *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); return (pvo); } } return (NULL); } static struct pte * pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If we haven't been supplied the ptegidx, calculate it. */ if (pteidx == -1) { int ptegidx; u_int sr; sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); pteidx = pmap_pvo_pte_index(pvo, ptegidx); } pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " "valid pte index", pvo); } if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " "pvo but no valid pte", pvo); } if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { panic("pmap_pvo_to_pte: pvo %p has valid pte in " "pmap_pteg_table %p but invalid in pvo", pvo, pt); } if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { panic("pmap_pvo_to_pte: pvo %p pte does not match " "pte %p in pmap_pteg_table", pvo, pt); } return (pt); } if (pvo->pvo_pte.pte_hi & PTE_VALID) { panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " "pmap_pteg_table but valid in pvo", pvo, pt); } return (NULL); } static void * pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { vm_page_t m; if (bytes != PAGE_SIZE) panic("pmap_pvo_allocf: benno was shortsighted. hit him."); *flags = UMA_SLAB_PRIV; m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM); if (m == NULL) return (NULL); pmap_pvo_count++; return ((void *)VM_PAGE_TO_PHYS(m)); } /* * XXX: THIS STUFF SHOULD BE IN pte.c? */ int pmap_pte_spill(vm_offset_t addr) { struct pvo_entry *source_pvo, *victim_pvo; struct pvo_entry *pvo; int ptegidx, i, j; u_int sr; struct pteg *pteg; struct pte *pt; pmap_pte_spills++; sr = mfsrin(addr); ptegidx = va_to_pteg(sr, addr); /* * Have to substitute some entry. Use the primary hash for this. * Use low bits of timebase as random generator. */ pteg = &pmap_pteg_table[ptegidx]; __asm __volatile("mftb %0" : "=r"(i)); i &= 7; pt = &pteg->pt[i]; source_pvo = NULL; victim_pvo = NULL; LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { /* * We need to find a pvo entry for this address. */ PMAP_PVO_CHECK(pvo); if (source_pvo == NULL && pmap_pte_match(&pvo->pvo_pte, sr, addr, pvo->pvo_pte.pte_hi & PTE_HID)) { /* * Now found an entry to be spilled into the pteg. * The PTE is now valid, so we know it's active. */ j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (j >= 0) { PVO_PTEGIDX_SET(pvo, j); pmap_pte_overflow--; PMAP_PVO_CHECK(pvo); return (1); } source_pvo = pvo; if (victim_pvo != NULL) break; } /* * We also need the pvo entry of the victim we are replacing * so save the R & C bits of the PTE. */ if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; if (source_pvo != NULL) break; } } if (source_pvo == NULL) return (0); if (victim_pvo == NULL) { if ((pt->pte_hi & PTE_HID) == 0) panic("pmap_pte_spill: victim p-pte (%p) has no pvo" "entry", pt); /* * If this is a secondary PTE, we need to search it's primary * pvo bucket for the matching PVO. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], pvo_olink) { PMAP_PVO_CHECK(pvo); /* * We also need the pvo entry of the victim we are * replacing so save the R & C bits of the PTE. */ if (pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; break; } } if (victim_pvo == NULL) panic("pmap_pte_spill: victim s-pte (%p) has no pvo" "entry", pt); } /* * We are invalidating the TLB entry for the EA we are replacing even * though it's valid. If we don't, we lose any ref/chg bit changes * contained in the TLB entry. */ source_pvo->pvo_pte.pte_hi &= ~PTE_HID; pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); pmap_pte_set(pt, &source_pvo->pvo_pte); PVO_PTEGIDX_CLR(victim_pvo); PVO_PTEGIDX_SET(source_pvo, i); pmap_pte_replacements++; PMAP_PVO_CHECK(victim_pvo); PMAP_PVO_CHECK(source_pvo); return (1); } static int pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) { struct pte *pt; int i; /* * First try primary hash. */ for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi &= ~PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } /* * Now try secondary hash. */ ptegidx ^= pmap_pteg_mask; ptegidx++; for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi |= PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } panic("pmap_pte_insert: overflow"); return (-1); } static boolean_t pmap_query_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; if (pmap_attr_fetch(m) & ptebit) return (TRUE); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if we saved the bit off. If so, cache it and return * success. */ if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } /* * No luck, now go through the hard part of looking at the PTEs * themselves. Sync so that any pending REF/CHG bits are flushed to * the PTEs. */ SYNC(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if this pvo has a valid PTE. if so, fetch the * REF/CHG bits from the valid PTE. If the appropriate * ptebit is set, cache it and return success. */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } } return (TRUE); } static boolean_t pmap_clear_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; int rv; /* * Clear the cached value. */ rv = pmap_attr_fetch(m); pmap_attr_clear(m, ptebit); /* * Sync so that any pending REF/CHG bits are flushed to the PTEs (so * we can reset the right ones). note that since the pvo entries and * list heads are accessed via BAT0 and are never placed in the page * table, we don't have to worry about further accesses setting the * REF/CHG bits. */ SYNC(); /* * For each pvo entry, clear the pvo's ptebit. If this pvo has a * valid pte clear the ptebit from the valid pte. */ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); } rv |= pvo->pvo_pte.pte_lo; pvo->pvo_pte.pte_lo &= ~ptebit; PMAP_PVO_CHECK(pvo); /* sanity check */ } return ((rv & ptebit) != 0); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, tmpva, offset; pa = trunc_page(pa); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); GIANT_REQUIRED; va = kmem_alloc_pageable(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { pmap_kenter(tmpva, pa); TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } return ((void *)(va + offset)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); kmem_free(kernel_map, base, size); } Index: head/sys/sparc64/sparc64/pmap.c =================================================================== --- head/sys/sparc64/sparc64/pmap.c (revision 101345) +++ head/sys/sparc64/sparc64/pmap.c (revision 101346) @@ -1,1797 +1,1790 @@ /* * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD$ */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include "opt_msgbuf.h" #include "opt_pmap.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif struct mem_region { vm_offset_t mr_start; vm_offset_t mr_size; }; struct ofw_map { vm_offset_t om_start; vm_offset_t om_size; u_long om_tte; }; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; /* * Physical addresses of first and last available physical page. */ vm_offset_t avail_start; vm_offset_t avail_end; int pmap_pagedaemon_waken; /* * Map of physical memory reagions. */ vm_offset_t phys_avail[128]; static struct mem_region mra[128]; static struct ofw_map translations[128]; static int translations_size; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * Kernel pmap. */ struct pmap kernel_pmap_store; static boolean_t pmap_initialized = FALSE; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size); static vm_offset_t pmap_map_direct(vm_page_t m); /* * If user pmap is processed with pmap_remove and with pmap_remove and the * resident count drops to 0, there are no more pages to remove, so we * need not continue. */ #define PMAP_REMOVE_DONE(pm) \ ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0) /* * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove() * and pmap_protect() instead of trying each virtual address. */ #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE) #ifdef PMAP_STATS static long pmap_enter_nupdate; static long pmap_enter_nreplace; static long pmap_enter_nnew; static long pmap_ncache_enter; static long pmap_ncache_enter_c; static long pmap_ncache_enter_cc; static long pmap_ncache_enter_nc; static long pmap_ncache_remove; static long pmap_ncache_remove_c; static long pmap_ncache_remove_cc; static long pmap_ncache_remove_nc; static long pmap_niflush; SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "Statistics"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nupdate, CTLFLAG_RD, &pmap_enter_nupdate, 0, "Number of pmap_enter() updates"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nreplace, CTLFLAG_RD, &pmap_enter_nreplace, 0, "Number of pmap_enter() replacements"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nnew, CTLFLAG_RD, &pmap_enter_nnew, 0, "Number of pmap_enter() additions"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter, CTLFLAG_RD, &pmap_ncache_enter, 0, "Number of pmap_cache_enter() calls"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_c, CTLFLAG_RD, &pmap_ncache_enter_c, 0, "Number of pmap_cache_enter() cacheable"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_cc, CTLFLAG_RD, &pmap_ncache_enter_cc, 0, "Number of pmap_cache_enter() change color"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_nc, CTLFLAG_RD, &pmap_ncache_enter_nc, 0, "Number of pmap_cache_enter() noncacheable"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove, CTLFLAG_RD, &pmap_ncache_remove, 0, "Number of pmap_cache_remove() calls"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_c, CTLFLAG_RD, &pmap_ncache_remove_c, 0, "Number of pmap_cache_remove() cacheable"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_cc, CTLFLAG_RD, &pmap_ncache_remove_cc, 0, "Number of pmap_cache_remove() change color"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_nc, CTLFLAG_RD, &pmap_ncache_remove_nc, 0, "Number of pmap_cache_remove() noncacheable"); SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_niflush, CTLFLAG_RD, &pmap_niflush, 0, "Number of pmap I$ flushes"); #define PMAP_STATS_INC(var) atomic_add_long(&var, 1) #else #define PMAP_STATS_INC(var) #endif /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *mra; const struct mem_region *mrb; mra = a; mrb = b; if (mra->mr_start < mrb->mr_start) return (-1); else if (mra->mr_start > mrb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *oma; const struct ofw_map *omb; oma = a; omb = b; if (oma->om_start < omb->om_start) return (-1); else if (oma->om_start > omb->om_start) return (1); else return (0); } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t ekva) { struct pmap *pm; struct tte *tp; vm_offset_t off; vm_offset_t pa; vm_offset_t va; vm_size_t physsz; ihandle_t pmem; ihandle_t vmem; int sz; int i; int j; /* * Set the start and end of kva. The kernel is loaded at the first * available 4 meg super page, so round up to the end of the page. */ virtual_avail = roundup2(ekva, PAGE_SIZE_4M); virtual_end = VM_MAX_KERNEL_ADDRESS; /* * Find out what physical memory is available from the prom and * initialize the phys_avail array. This must be done before * pmap_bootstrap_alloc is called. */ if ((pmem = OF_finddevice("/memory")) == -1) panic("pmap_bootstrap: finddevice /memory"); if ((sz = OF_getproplen(pmem, "available")) == -1) panic("pmap_bootstrap: getproplen /memory/available"); if (sizeof(phys_avail) < sz) panic("pmap_bootstrap: phys_avail too small"); if (sizeof(mra) < sz) panic("pmap_bootstrap: mra too small"); bzero(mra, sz); if (OF_getprop(pmem, "available", mra, sz) == -1) panic("pmap_bootstrap: getprop /memory/available"); sz /= sizeof(*mra); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(mra, sz, sizeof (*mra), mr_cmp); physsz = 0; for (i = 0, j = 0; i < sz; i++, j += 2) { CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start, mra[i].mr_size); phys_avail[j] = mra[i].mr_start; phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size; physsz += mra[i].mr_size; } physmem = btoc(physsz); /* * Allocate the kernel tsb and lock it in the tlb. */ pa = pmap_bootstrap_alloc(KVA_PAGES * PAGE_SIZE_4M); if (pa & PAGE_MASK_4M) panic("pmap_bootstrap: tsb unaligned\n"); tsb_kernel_phys = pa; tsb_kernel = (struct tte *)virtual_avail; virtual_avail += KVA_PAGES * PAGE_SIZE_4M; pmap_map_tsb(); bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M); /* * Enter fake 8k pages for the 4MB kernel pages, so that * pmap_kextract() will work for them. */ for (i = 0; i < kernel_tlb_slots; i++) { pa = kernel_tlbs[i].te_pa; va = kernel_tlbs[i].te_va; for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) { tp = tsb_kvtotte(va + off); tp->tte_vpn = TV_VPN(va + off); tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; } } /* * Allocate a kernel stack with guard page for thread0 and map it into * the kernel tsb. */ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE); kstack0_phys = pa; kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; tp = tsb_kvtotte(va); tp->tte_vpn = TV_VPN(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; } /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); /* * Add the prom mappings to the kernel tsb. */ if ((vmem = OF_finddevice("/virtual-memory")) == -1) panic("pmap_bootstrap: finddevice /virtual-memory"); if ((sz = OF_getproplen(vmem, "translations")) == -1) panic("pmap_bootstrap: getproplen translations"); if (sizeof(translations) < sz) panic("pmap_bootstrap: translations too small"); bzero(translations, sz); if (OF_getprop(vmem, "translations", translations, sz) == -1) panic("pmap_bootstrap: getprop /virtual-memory/translations"); sz /= sizeof(*translations); translations_size = sz; CTR0(KTR_PMAP, "pmap_bootstrap: translations"); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: start=%#lx size=%#lx tte=%#lx", translations[i].om_start, translations[i].om_size, translations[i].om_tte); if (translations[i].om_start < VM_MIN_PROM_ADDRESS || translations[i].om_start > VM_MAX_PROM_ADDRESS) continue; for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) { va = translations[i].om_start + off; tp = tsb_kvtotte(va); tp->tte_vpn = TV_VPN(va); tp->tte_data = translations[i].om_tte + off; } } /* * Calculate the first and last available physical addresses. */ avail_start = phys_avail[0]; for (i = 0; phys_avail[i + 2] != 0; i += 2) ; avail_end = phys_avail[i + 1]; Maxmem = sparc64_btop(avail_end); /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize the kernel pmap (which is statically allocated). */ pm = kernel_pmap; for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = TLB_CTX_KERNEL; pm->pm_active = ~0; /* XXX flush all non-locked tlb entries */ } void pmap_map_tsb(void) { vm_offset_t va; vm_offset_t pa; u_long data; u_long s; int i; s = intr_disable(); /* * Map the 4mb tsb pages. */ for (i = 0; i < KVA_PAGES; i++) { va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M; pa = tsb_kernel_phys + i * PAGE_SIZE_4M; /* XXX - cheetah */ data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(TLB_CTX_KERNEL)); stxa_sync(0, ASI_DTLB_DATA_IN_REG, data); } /* * Load the tsb registers. */ stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel); stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel); membar(Sync); flush(tsb_kernel); /* * Set the secondary context to be the kernel context (needed for * fp block operations in the kernel and the cache code). */ stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL); membar(Sync); intr_restore(s); } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size) { vm_offset_t pa; int i; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i + 1] - phys_avail[i] < size) continue; pa = phys_avail[i]; phys_avail[i] += size; return (pa); } panic("pmap_bootstrap_alloc"); } void pmap_context_rollover(void) { u_long data; u_long tag; int i; mtx_assert(&sched_lock, MA_OWNED); CTR0(KTR_PMAP, "pmap_context_rollover"); for (i = 0; i < tlb_slot_count; i++) { /* XXX - cheetah */ data = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG); tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG); if ((data & TD_V) != 0 && (data & TD_L) == 0 && TLB_TAR_CTX(tag) != TLB_CTX_KERNEL) stxa_sync(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0); data = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG); tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG); if ((data & TD_V) != 0 && (data & TD_L) == 0 && TLB_TAR_CTX(tag) != TLB_CTX_KERNEL) stxa_sync(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0); } PCPU_SET(tlb_ctx, PCPU_GET(tlb_ctx_min)); } static __inline u_int pmap_context_alloc(void) { u_int context; mtx_assert(&sched_lock, MA_OWNED); context = PCPU_GET(tlb_ctx); if (context + 1 == PCPU_GET(tlb_ctx_max)) pmap_context_rollover(); else PCPU_SET(tlb_ctx, context + 1); return (context); } /* * Initialize the pmap module. */ void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { vm_offset_t addr; vm_size_t size; int result; int i; for (i = 0; i < vm_page_array_size; i++) { vm_page_t m; m = &vm_page_array[i]; STAILQ_INIT(&m->md.tte_list); m->md.flags = 0; m->md.color = 0; } for (i = 0; i < translations_size; i++) { addr = translations[i].om_start; size = translations[i].om_size; if (addr < 0xf0000000) /* XXX */ continue; result = vm_map_find(kernel_map, NULL, 0, &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); if (result != KERN_SUCCESS || addr != translations[i].om_start) panic("pmap_init: vm_map_find"); } pmap_initialized = TRUE; } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2(void) { } /* * Extract the physical page address associated with the given * map/virtual_address pair. */ vm_offset_t pmap_extract(pmap_t pm, vm_offset_t va) { struct tte *tp; if (pm == kernel_pmap) return (pmap_kextract(va)); tp = tsb_tte_lookup(pm, va); if (tp == NULL) return (0); else return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp))); } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct tte *tp; tp = tsb_kvtotte(va); if ((tp->tte_data & TD_V) == 0) return (0); return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp))); } int pmap_cache_enter(vm_page_t m, vm_offset_t va) { struct tte *tp; int color; PMAP_STATS_INC(pmap_ncache_enter); /* * Find the color for this virtual address and note the added mapping. */ color = DCACHE_COLOR(va); m->md.colors[color]++; /* * If all existing mappings have the same color, the mapping is * cacheable. */ if (m->md.color == color) { KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0, ("pmap_cache_enter: cacheable, mappings of other color")); PMAP_STATS_INC(pmap_ncache_enter_c); return (1); } /* * If there are no mappings of the other color, and the page still has * the wrong color, this must be a new mapping. Change the color to * match the new mapping, which is cacheable. We must flush the page * from the cache now. */ if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) { KASSERT(m->md.colors[color] == 1, ("pmap_cache_enter: changing color, not new mapping")); dcache_page_inval(VM_PAGE_TO_PHYS(m)); m->md.color = color; PMAP_STATS_INC(pmap_ncache_enter_cc); return (1); } PMAP_STATS_INC(pmap_ncache_enter_nc); /* * If the mapping is already non-cacheable, just return. */ if (m->md.color == -1) return (0); /* * Mark all mappings as uncacheable, flush any lines with the other * color out of the dcache, and set the color to none (-1). */ STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { tp->tte_data &= ~TD_CV; tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } dcache_page_inval(VM_PAGE_TO_PHYS(m)); m->md.color = -1; return (0); } void pmap_cache_remove(vm_page_t m, vm_offset_t va) { struct tte *tp; int color; CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0, ("pmap_cache_remove: no mappings %d <= 0", m->md.colors[DCACHE_COLOR(va)])); PMAP_STATS_INC(pmap_ncache_remove); /* * Find the color for this virtual address and note the removal of * the mapping. */ color = DCACHE_COLOR(va); m->md.colors[color]--; /* * If the page is cacheable, just return and keep the same color, even * if there are no longer any mappings. */ if (m->md.color != -1) { PMAP_STATS_INC(pmap_ncache_remove_c); return; } KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0, ("pmap_cache_remove: uncacheable, no mappings of other color")); /* * If the page is not cacheable (color is -1), and the number of * mappings for this color is not zero, just return. There are * mappings of the other color still, so remain non-cacheable. */ if (m->md.colors[color] != 0) { PMAP_STATS_INC(pmap_ncache_remove_nc); return; } PMAP_STATS_INC(pmap_ncache_remove_cc); /* * The number of mappings for this color is now zero. Recache the * other colored mappings, and change the page color to the other * color. There should be no lines in the data cache for this page, * so flushing should not be needed. */ STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { tp->tte_data |= TD_CV; tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } m->md.color = DCACHE_OTHER_COLOR(color); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { vm_offset_t ova; struct tte *tp; vm_page_t om; vm_page_t m; u_long data; tp = tsb_kvtotte(va); m = PHYS_TO_VM_PAGE(pa); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", va, pa, tp, tp->tte_data); if ((tp->tte_data & TD_V) != 0) { om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); ova = TTE_GET_VA(tp); STAILQ_REMOVE(&om->md.tte_list, tp, tte, tte_link); pmap_cache_remove(om, ova); if (va != ova) tlb_page_demap(kernel_pmap, ova); } data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_P | TD_W; if (pmap_cache_enter(m, va) != 0) data |= TD_CV; tp->tte_vpn = TV_VPN(va); tp->tte_data = data; STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); tp->tte_pmap = kernel_pmap; } /* * Map a wired page into kernel virtual address space. This additionally * takes a flag argument wich is or'ed to the TTE data. This is used by * bus_space_map(). * NOTE: if the mapping is non-cacheable, it's the caller's responsibility * to flush entries that might still be in the cache, if applicable. */ void pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags) { struct tte *tp; tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx", va, pa, tp, tp->tte_data); tp->tte_vpn = TV_VPN(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags; } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_offset_t pa, int i) { TODO; } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { struct tte *tp; vm_page_t m; tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link); pmap_cache_remove(m, va); TTE_ZERO(tp); } /* * Inverse of pmap_kenter_flags, used by bus_space_unmap(). */ void pmap_kremove_flags(vm_offset_t va) { struct tte *tp; tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); TTE_ZERO(tp); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { struct tte *tp; vm_offset_t sva; vm_offset_t va; vm_offset_t pa; pa = pa_start; sva = *virt; va = sva; for (; pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) { tp = tsb_kvtotte(va); tp->tte_vpn = TV_VPN(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; } tlb_range_demap(kernel_pmap, sva, sva + (pa_end - pa_start) - 1); *virt = va; return (sva); } static vm_offset_t pmap_map_direct(vm_page_t m) { vm_offset_t pa; vm_offset_t va; pa = VM_PAGE_TO_PHYS(m); if (m->md.color == -1) { KASSERT(m->md.colors[0] != 0 && m->md.colors[1] != 0, ("pmap_map_direct: non-cacheable, only 1 color")); va = TLB_DIRECT_MASK | pa | TLB_DIRECT_UNCACHEABLE; } else { KASSERT(m->md.colors[DCACHE_OTHER_COLOR(m->md.color)] == 0, ("pmap_map_direct: cacheable, mappings of other color")); va = TLB_DIRECT_MASK | pa | (m->md.color << TLB_DIRECT_COLOR_SHIFT); } return (va << TLB_DIRECT_SHIFT); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; int i; va = sva; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1); } /* * As above, but take an additional flags argument and call * pmap_kenter_flags(). */ void pmap_qenter_flags(vm_offset_t sva, vm_page_t *m, int count, u_long fl) { vm_offset_t va; int i; va = sva; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter_flags(va, VM_PAGE_TO_PHYS(m[i]), fl); tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; int i; va = sva; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kremove(va); tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1); } /* * Create the kernel stack and pcb for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. */ void pmap_new_thread(struct thread *td) { vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; u_int i; /* * Allocate object for the kstack, */ ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; /* * Get a kernel virtual address for the kstack for this thread. */ ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); if (KSTACK_GUARD_PAGES != 0) { tlb_page_demap(kernel_pmap, ks); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; } td->td_kstack = ks; for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page. */ - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(ksobj, i, + VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); ma[i] = m; - /* - * Wire the page. - */ - m->wire_count++; - cnt.v_wire_count++; - vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } /* * Enter the page into the kernel address space. */ pmap_qenter(ks, ma, KSTACK_PAGES); } /* * Dispose the kernel stack for a thread that has exited. * This routine directly impacts the exit perf of a process and thread. */ void pmap_dispose_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int i; ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_dispose_thread: kstack already missing?"); vm_page_lock_queues(); vm_page_busy(m); vm_page_unwire(m, 0); vm_page_free(m); vm_page_unlock_queues(); } pmap_qremove(ks, KSTACK_PAGES); kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); vm_object_deallocate(ksobj); } /* * Allow the kernel stack for a thread to be prejudicially paged out. */ void pmap_swapout_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int i; ksobj = td->td_kstack_obj; ks = (vm_offset_t)td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_swapout_thread: kstack already missing?"); vm_page_lock_queues(); vm_page_dirty(m); vm_page_unwire(m, 0); vm_page_unlock_queues(); } pmap_qremove(ks, KSTACK_PAGES); } /* * Bring the kernel stack for a specified thread back in. */ void pmap_swapin_thread(struct thread *td) { vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int rv; int i; ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(ksobj, &m, 1, 0); if (rv != VM_PAGER_OK) panic("pmap_swapin_thread: cannot get kstack"); m = vm_page_lookup(ksobj, i); m->valid = VM_PAGE_BITS_ALL; } ma[i] = m; vm_page_lock_queues(); vm_page_wire(m); vm_page_wakeup(m); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); vm_page_unlock_queues(); } pmap_qenter(ks, ma, KSTACK_PAGES); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { int i; for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = 0; pm->pm_active = 0; pm->pm_tsb = NULL; pm->pm_tsb_obj = NULL; bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Initialize a preallocated and zeroed pmap structure, uch as one in a * vmspace structure. */ void pmap_pinit(pmap_t pm) { vm_page_t ma[TSB_PAGES]; vm_page_t m; int i; /* * Allocate kva space for the tsb. */ if (pm->pm_tsb == NULL) { pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map, TSB_BSIZE); } /* * Allocate an object for it. */ if (pm->pm_tsb_obj == NULL) pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES); for (i = 0; i < TSB_PAGES; i++) { m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_RETRY | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->wire_count++; cnt.v_wire_count++; vm_page_flag_clear(m, PG_MAPPED | PG_BUSY); m->valid = VM_PAGE_BITS_ALL; ma[i] = m; } pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES); for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = -1; pm->pm_active = 0; bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } void pmap_pinit2(pmap_t pmap) { /* XXX: Remove this stub when no longer called */ } /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pm) { vm_object_t obj; vm_page_t m; CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb); obj = pm->pm_tsb_obj; KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); KASSERT(pmap_resident_count(pm) == 0, ("pmap_release: resident pages %ld != 0", pmap_resident_count(pm))); while (!TAILQ_EMPTY(&obj->memq)) { m = TAILQ_FIRST(&obj->memq); if (vm_page_sleep_busy(m, FALSE, "pmaprl")) continue; vm_page_busy(m); KASSERT(m->hold_count == 0, ("pmap_release: freeing held tsb page")); m->wire_count--; cnt.v_wire_count--; vm_page_free_zero(m); } pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } /* * This routine is very drastic, but can save the system * in a pinch. */ void pmap_collect(void) { } int pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va) { vm_page_t m; m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link); if ((tp->tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; if ((tp->tte_data & TD_PV) != 0) { if ((tp->tte_data & TD_W) != 0 && pmap_track_modified(pm, va)) vm_page_dirty(m); if ((tp->tte_data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); if (STAILQ_EMPTY(&m->md.tte_list)) vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); pm->pm_stats.resident_count--; } pmap_cache_remove(m, va); TTE_ZERO(tp); if (PMAP_REMOVE_DONE(pm)) return (0); return (1); } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) { struct tte *tp; vm_offset_t va; CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", pm->pm_context[PCPU_GET(cpuid)], start, end); if (PMAP_REMOVE_DONE(pm)) return; if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); tlb_context_demap(pm); } else { for (va = start; va < end; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) { if (!pmap_remove_tte(pm, NULL, tp, va)) break; } } tlb_range_demap(pm, start, end - 1); } } void pmap_remove_all(vm_page_t m) { struct pmap *pm; struct tte *tpn; struct tte *tp; vm_offset_t va; KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0, ("pv_remove_all: illegal for unmanaged page %#lx", VM_PAGE_TO_PHYS(m))); for (tp = STAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = STAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) continue; pm = TTE_GET_PMAP(tp); va = TTE_GET_VA(tp); if ((tp->tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; if ((tp->tte_data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); if ((tp->tte_data & TD_W) != 0 && pmap_track_modified(pm, va)) vm_page_dirty(m); tp->tte_data &= ~TD_V; tlb_page_demap(pm, va); STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link); pm->pm_stats.resident_count--; pmap_cache_remove(m, va); TTE_ZERO(tp); } vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } int pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va) { vm_page_t m; if ((tp->tte_data & TD_PV) != 0) { m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); if ((tp->tte_data & TD_REF) != 0) { vm_page_flag_set(m, PG_REFERENCED); tp->tte_data &= ~TD_REF; } if ((tp->tte_data & TD_W) != 0 && pmap_track_modified(pm, va)) { vm_page_dirty(m); } } tp->tte_data &= ~(TD_W | TD_SW); return (0); } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va; struct tte *tp; CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } if (prot & VM_PROT_WRITE) return; if (eva - sva > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); tlb_context_demap(pm); } else { for (va = sva; va < eva; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) pmap_protect_tte(pm, NULL, tp, va); } tlb_range_demap(pm, sva, eva - 1); } } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct tte *tp; vm_offset_t pa; u_long data; pa = VM_PAGE_TO_PHYS(m); CTR6(KTR_PMAP, "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired); /* * If there is an existing mapping, and the physical address has not * changed, must be protection or wiring change. */ if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { CTR0(KTR_PMAP, "pmap_enter: update"); PMAP_STATS_INC(pmap_enter_nupdate); /* * Wiring change, just update stats. */ if (wired) { if ((tp->tte_data & TD_WIRED) == 0) { tp->tte_data |= TD_WIRED; pm->pm_stats.wired_count++; } } else { if ((tp->tte_data & TD_WIRED) != 0) { tp->tte_data &= ~TD_WIRED; pm->pm_stats.wired_count--; } } /* * Save the old bits and clear the ones we're interested in. */ data = tp->tte_data; tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W); /* * If we're turning off write permissions, sense modify status. */ if ((prot & VM_PROT_WRITE) != 0) { tp->tte_data |= TD_SW; if (wired) { tp->tte_data |= TD_W; } } else if ((data & TD_W) != 0 && pmap_track_modified(pm, va)) { vm_page_dirty(m); } /* * If we're turning on execute permissions, flush the icache. */ if ((prot & VM_PROT_EXECUTE) != 0) { if ((data & TD_EXEC) == 0) { PMAP_STATS_INC(pmap_niflush); icache_page_inval(pa); } tp->tte_data |= TD_EXEC; } /* * Delete the old mapping. */ tlb_page_demap(pm, TTE_GET_VA(tp)); } else { /* * If there is an existing mapping, but its for a different * phsyical address, delete the old mapping. */ if (tp != NULL) { CTR0(KTR_PMAP, "pmap_enter: replace"); PMAP_STATS_INC(pmap_enter_nreplace); pmap_remove_tte(pm, NULL, tp, va); tlb_page_demap(pm, va); } else { CTR0(KTR_PMAP, "pmap_enter: new"); PMAP_STATS_INC(pmap_enter_nnew); } /* * Now set up the data and install the new mapping. */ data = TD_V | TD_8K | TD_PA(pa) | TD_CP; if (pm == kernel_pmap) data |= TD_P; if (prot & VM_PROT_WRITE) data |= TD_SW; if (prot & VM_PROT_EXECUTE) { data |= TD_EXEC; PMAP_STATS_INC(pmap_niflush); icache_page_inval(pa); } /* * If its wired update stats. We also don't need reference or * modify tracking for wired mappings, so set the bits now. */ if (wired) { pm->pm_stats.wired_count++; data |= TD_REF | TD_WIRED; if ((prot & VM_PROT_WRITE) != 0) data |= TD_W; } tsb_tte_enter(pm, m, va, data); } } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { /* XXX */ } void pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) { /* XXX */ } /* * Change the wiring attribute for a map/virtual-address pair. * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct tte *tp; if ((tp = tsb_tte_lookup(pm, va)) != NULL) { if (wired) { if ((tp->tte_data & TD_WIRED) == 0) pm->pm_stats.wired_count++; tp->tte_data |= TD_WIRED; } else { if ((tp->tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; tp->tte_data &= ~TD_WIRED; } } } static int pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va) { vm_page_t m; u_long data; if (tsb_tte_lookup(dst_pmap, va) == NULL) { data = tp->tte_data & ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W); m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); tsb_tte_enter(dst_pmap, m, va, data); } return (1); } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { struct tte *tp; vm_offset_t va; if (dst_addr != src_addr) return; if (len > PMAP_TSB_THRESH) { tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, pmap_copy_tte); tlb_context_demap(dst_pmap); } else { for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL) pmap_copy_tte(src_pmap, dst_pmap, tp, va); } tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); } } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va; va = pmap_map_direct(m); CTR2(KTR_PMAP, "pmap_zero_page: pa=%#lx va=%#lx", VM_PAGE_TO_PHYS(m), va); bzero((void *)va, PAGE_SIZE); } void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va; KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size")); va = pmap_map_direct(m); CTR4(KTR_PMAP, "pmap_zero_page_area: pa=%#lx va=%#lx off=%#x size=%#x", VM_PAGE_TO_PHYS(m), va, off, size); bzero((void *)(va + off), size); } void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va; va = pmap_map_direct(m); CTR2(KTR_PMAP, "pmap_zero_page_idle: pa=%#lx va=%#lx", VM_PAGE_TO_PHYS(m), va); bzero((void *)va, PAGE_SIZE); } /* * Copy a page of physical memory by temporarily mapping it into the tlb. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; src = pmap_map_direct(msrc); dst = pmap_map_direct(mdst); CTR4(KTR_PMAP, "pmap_zero_page: src=%#lx va=%#lx dst=%#lx va=%#lx", VM_PAGE_TO_PHYS(msrc), src, VM_PAGE_TO_PHYS(mdst), dst); bcopy((void *)src, (void *)dst, PAGE_SIZE); } /* * Make the specified page pageable (or not). Unneeded. */ void pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pm, vm_page_t m) { struct tte *tp; int loops; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); loops = 0; STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if (TTE_GET_PMAP(tp) == pm) return (TRUE); if (++loops >= 16) break; } return (FALSE); } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove n the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) pmap_clear_write(m); else pmap_remove_all(m); } } vm_offset_t pmap_phys_address(int ppn) { return (sparc64_ptob(ppn)); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { struct tte *tpf; struct tte *tpn; struct tte *tp; int count; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (0); count = 0; if ((tp = STAILQ_FIRST(&m->md.tte_list)) != NULL) { tpf = tp; do { tpn = STAILQ_NEXT(tp, tte_link); STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link); STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); if ((tp->tte_data & TD_PV) == 0 || !pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp))) continue; if ((tp->tte_data & TD_REF) != 0) { tp->tte_data &= ~TD_REF; if (++count > 4) break; } } while ((tp = tpn) != NULL && tp != tpf); } return (count); } boolean_t pmap_is_modified(vm_page_t m) { struct tte *tp; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return FALSE; STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0 || !pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp))) continue; if ((tp->tte_data & TD_W) != 0) return (TRUE); } return (FALSE); } void pmap_clear_modify(vm_page_t m) { struct tte *tp; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if ((tp->tte_data & TD_W) != 0) { tp->tte_data &= ~TD_W; tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } } void pmap_clear_reference(vm_page_t m) { struct tte *tp; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if ((tp->tte_data & TD_REF) != 0) { tp->tte_data &= ~TD_REF; tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } } void pmap_clear_write(vm_page_t m) { struct tte *tp; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if ((tp->tte_data & (TD_SW | TD_W)) != 0) { if ((tp->tte_data & TD_W) != 0 && pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp))) vm_page_dirty(m); tp->tte_data &= ~(TD_SW | TD_W); tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } } int pmap_mincore(pmap_t pm, vm_offset_t addr) { TODO; return (0); } /* * Activate a user pmap. The pmap must be activated before its address space * can be accessed in any way. */ void pmap_activate(struct thread *td) { struct vmspace *vm; vm_offset_t tsb; u_long context; pmap_t pm; /* * Load all the data we need up front to encourage the compiler to * not issue any loads while we have interrupts disable below. */ vm = td->td_proc->p_vmspace; pm = &vm->vm_pmap; tsb = (vm_offset_t)pm->pm_tsb; KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); KASSERT(pm->pm_context[PCPU_GET(cpuid)] != 0, ("pmap_activate: activating nucleus context?")); mtx_lock_spin(&sched_lock); wrpr(pstate, 0, PSTATE_MMU); mov(tsb, TSB_REG); wrpr(pstate, 0, PSTATE_KERNEL); context = pmap_context_alloc(); pm->pm_context[PCPU_GET(cpuid)] = context; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(vmspace, vm); stxa(AA_DMMU_PCXR, ASI_DMMU, context); membar(Sync); mtx_unlock_spin(&sched_lock); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); }