Index: stable/6/sys/alpha/alpha/pmap.c =================================================================== --- stable/6/sys/alpha/alpha/pmap.c (revision 173366) +++ stable/6/sys/alpha/alpha/pmap.c (revision 173367) @@ -1,2802 +1,2800 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 1998 Doug Rabson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap */ /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ /* * Notes for alpha pmap. * * On alpha, pm_pdeobj will hold lev1, lev2 and lev3 page tables. * Indices from 0 to NUSERLEV3MAPS-1 will map user lev3 page tables, * indices from NUSERLEV3MAPS to NUSERLEV3MAPS+NUSERLEV2MAPS-1 will * map user lev2 page tables and index NUSERLEV3MAPS+NUSERLEV2MAPS * will map the lev1 page table. The lev1 table will self map at * address VADDR(PTLEV1I,0,0). * * The vm_object kptobj holds the kernel page tables on i386 (62 or 63 * of them, depending on whether the system is SMP). On alpha, kptobj * will hold the lev3 and lev2 page tables for K1SEG. Indices 0 to * NKLEV3MAPS-1 will map kernel lev3 page tables and indices * NKLEV3MAPS to NKLEV3MAPS+NKLEV2MAPS will map lev2 page tables. (XXX * should the kernel Lev1map be inserted into this object?). * * pvtmmap is not needed for alpha since K0SEG maps all of physical * memory. * * * alpha virtual memory map: * * * Address Lev1 index * * --------------------------------- * 0000000000000000 | | 0 * | | * | | * | | * | | * --- --- * User space (USEG) * --- --- * | | * | | * | | * | | * 000003ffffffffff | | 511=UMAXLEV1I * --------------------------------- * fffffc0000000000 | | 512=K0SEGLEV1I * | Kernel code/data/bss | * | | * | | * | | * --- --- * K0SEG * --- --- * | | * | 1-1 physical/virtual | * | | * | | * fffffdffffffffff | | * --------------------------------- * fffffe0000000000 | | 768=K1SEGLEV1I * | Kernel dynamic data | * | | * | | * | | * --- --- * K1SEG * --- --- * | | * | mapped by ptes | * | | * | | * fffffff7ffffffff | | * --------------------------------- * fffffffe00000000 | | 1023=PTLEV1I * | PTmap (pte self map) | * ffffffffffffffff | | * --------------------------------- * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if defined(DIAGNOSTIC) #define PMAP_DIAGNOSTIC #endif #if 0 #define PMAP_DIAGNOSTIC #define PMAP_DEBUG #endif #if !defined(PMAP_DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif /* * Some macros for manipulating virtual addresses */ #define ALPHA_L1SIZE (1L << ALPHA_L1SHIFT) #define ALPHA_L2SIZE (1L << ALPHA_L2SHIFT) #define alpha_l1trunc(va) ((va) & ~(ALPHA_L1SIZE-1)) #define alpha_l2trunc(va) ((va) & ~(ALPHA_L2SIZE-1)) /* * Get PDEs and PTEs for user/kernel address space */ #define pmap_pte_w(pte) ((*(pte) & PG_W) != 0) #define pmap_pte_managed(pte) ((*(pte) & PG_MANAGED) != 0) #define pmap_pte_v(pte) ((*(pte) & PG_V) != 0) #define pmap_pte_pa(pte) alpha_ptob(ALPHA_PTE_TO_PFN(*(pte))) #define pmap_pte_prot(pte) (*(pte) & PG_PROT) #define pmap_pte_set_w(pte, v) ((v)?(*pte |= PG_W):(*pte &= ~PG_W)) #define pmap_pte_set_prot(pte, v) ((*pte &= ~PG_PROT), (*pte |= (v))) /* * Given a map and a machine independent protection code, * convert to an alpha protection code. */ #define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p]) int protection_codes[2][8]; /* * Return non-zero if this pmap is currently active */ #define pmap_isactive(pmap) (pmap->pm_active) /* * Extract level 1, 2 and 3 page table indices from a va */ #define PTMASK ((1 << ALPHA_PTSHIFT) - 1) #define pmap_lev1_index(va) (((va) >> ALPHA_L1SHIFT) & PTMASK) #define pmap_lev2_index(va) (((va) >> ALPHA_L2SHIFT) & PTMASK) #define pmap_lev3_index(va) (((va) >> ALPHA_L3SHIFT) & PTMASK) /* * Given a physical address, construct a pte */ #define pmap_phys_to_pte(pa) ALPHA_PTE_FROM_PFN(alpha_btop(pa)) /* * Given a page frame number, construct a k0seg va */ #define pmap_k0seg_to_pfn(va) alpha_btop(ALPHA_K0SEG_TO_PHYS(va)) /* * Given a pte, construct a k0seg va */ #define pmap_k0seg_to_pte(va) ALPHA_PTE_FROM_PFN(pmap_k0seg_to_pfn(va)) /* * Lev1map: * * Kernel level 1 page table. This maps all kernel level 2 * page table pages, and is used as a template for all user * pmap level 1 page tables. When a new user level 1 page * table is allocated, all Lev1map PTEs for kernel addresses * are copied to the new map. * * Lev2map: * * Initial set of kernel level 2 page table pages. These * map the kernel level 3 page table pages. As kernel * level 3 page table pages are added, more level 2 page * table pages may be added to map them. These pages are * never freed. * * Lev3map: * * Initial set of kernel level 3 page table pages. These * map pages in K1SEG. More level 3 page table pages may * be added at run-time if additional K1SEG address space * is required. These pages are never freed. * * Lev2mapsize: * * Number of entries in the initial Lev2map. * * Lev3mapsize: * * Number of entries in the initial Lev3map. * * NOTE: When mappings are inserted into the kernel pmap, all * level 2 and level 3 page table pages must already be allocated * and mapped into the parent page table. */ pt_entry_t *Lev1map, *Lev2map, *Lev3map; vm_size_t Lev2mapsize, Lev3mapsize; /* * Statically allocated kernel pmap */ struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static int nklev3, nklev2; vm_offset_t kernel_vm_end; /* * Data for the ASN allocator */ static int pmap_maxasn; static pmap_t pmap_active[MAXCPU]; static LIST_HEAD(,pmap) allpmaps; static struct mtx allpmaps_lock; /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; int pmap_pagedaemon_waken; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t locked_pmap); static void alpha_protection_init(void); static void pmap_changebit(vm_page_t m, int bit, boolean_t setem); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte); static int pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); static int pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m); static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); #ifdef SMP static void pmap_invalidate_page_action(void *arg); static void pmap_invalidate_all_action(void *arg); #endif /* * Routine: pmap_lev1pte * Function: * Extract the level 1 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev1pte(pmap_t pmap, vm_offset_t va) { if (!pmap) return 0; return &pmap->pm_lev1[pmap_lev1_index(va)]; } /* * Routine: pmap_lev2pte * Function: * Extract the level 2 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev2pte(pmap_t pmap, vm_offset_t va) { pt_entry_t* l1pte; pt_entry_t* l2map; l1pte = pmap_lev1pte(pmap, va); if (!pmap_pte_v(l1pte)) return 0; l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); return &l2map[pmap_lev2_index(va)]; } /* * Routine: pmap_lev3pte * Function: * Extract the level 3 page table entry associated * with the given map/virtual_address pair. */ static PMAP_INLINE pt_entry_t* pmap_lev3pte(pmap_t pmap, vm_offset_t va) { pt_entry_t* l2pte; pt_entry_t* l3map; l2pte = pmap_lev2pte(pmap, va); if (!l2pte || !pmap_pte_v(l2pte)) return 0; l3map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte)); return &l3map[pmap_lev3_index(va)]; } vm_offset_t pmap_steal_memory(vm_size_t size) { vm_size_t bank_size; vm_offset_t pa, va; size = round_page(size); bank_size = phys_avail[1] - phys_avail[0]; while (size > bank_size) { int i; for (i = 0; phys_avail[i+2]; i+= 2) { phys_avail[i] = phys_avail[i+2]; phys_avail[i+1] = phys_avail[i+3]; } phys_avail[i] = 0; phys_avail[i+1] = 0; if (!phys_avail[0]) panic("pmap_steal_memory: out of memory"); bank_size = phys_avail[1] - phys_avail[0]; } pa = phys_avail[0]; phys_avail[0] += size; va = ALPHA_PHYS_TO_K0SEG(pa); bzero((caddr_t) va, size); return va; } extern pt_entry_t rom_pte; /* XXX */ extern int prom_mapped; /* XXX */ /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t ptaddr, u_int maxasn) { pt_entry_t newpte; int i; /* * Setup ASNs. PCPU_GET(next_asn) and PCPU_GET(current_asngen) are set * up already. */ pmap_maxasn = maxasn; /* * Allocate a level 1 map for the kernel. */ Lev1map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); /* * Allocate a level 2 map for the kernel */ Lev2map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); Lev2mapsize = PAGE_SIZE; /* * Allocate some level 3 maps for the kernel */ Lev3map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE*NKPT); Lev3mapsize = NKPT * PAGE_SIZE; /* Map all of the level 2 maps */ for (i = 0; i < howmany(Lev2mapsize, PAGE_SIZE); i++) { unsigned long pfn = pmap_k0seg_to_pfn((vm_offset_t) Lev2map) + i; newpte = ALPHA_PTE_FROM_PFN(pfn); newpte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_W; Lev1map[K1SEGLEV1I + i] = newpte; } /* Setup the mapping for the prom console */ { if (pmap_uses_prom_console()) { /* XXX save old pte so that we can remap prom if necessary */ rom_pte = *(pt_entry_t *)ptaddr & ~PG_ASM; /* XXX */ } prom_mapped = 0; /* * Actually, this code lies. The prom is still mapped, and will * remain so until the context switch after alpha_init() returns. * Printfs using the firmware before then will end up frobbing * Lev1map unnecessarily, but that's OK. */ } /* * Level 1 self mapping. * * Don't set PG_ASM since the self-mapping is different for each * address space. */ newpte = pmap_k0seg_to_pte((vm_offset_t) Lev1map); newpte |= PG_V | PG_KRE | PG_KWE; Lev1map[PTLEV1I] = newpte; /* Map all of the level 3 maps */ for (i = 0; i < howmany(Lev3mapsize, PAGE_SIZE); i++) { unsigned long pfn = pmap_k0seg_to_pfn((vm_offset_t) Lev3map) + i; newpte = ALPHA_PTE_FROM_PFN(pfn); newpte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_W; Lev2map[i] = newpte; } virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VPTBASE; /* * Initialize protection array. */ alpha_protection_init(); /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_lev1 = Lev1map; kernel_pmap->pm_active = ~0; kernel_pmap->pm_asn[alpha_pal_whami()].asn = 0; kernel_pmap->pm_asn[alpha_pal_whami()].gen = 1; TAILQ_INIT(&kernel_pmap->pm_pvlist); nklev3 = NKPT; nklev2 = 1; /* * Initialize list of pmaps. */ LIST_INIT(&allpmaps); LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); /* * Set up proc0's PCB such that the ptbr points to the right place * and has the kernel pmap's. */ thread0.td_pcb->pcb_hw.apcb_ptbr = ALPHA_K0SEG_TO_PHYS((vm_offset_t)Lev1map) >> PAGE_SHIFT; thread0.td_pcb->pcb_hw.apcb_asn = 0; } int pmap_uses_prom_console() { int cputype; cputype = hwrpb->rpb_type; return (cputype == ST_DEC_21000 || ST_DEC_4100); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); } void pmap_init2() { } /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ static void pmap_invalidate_asn(pmap_t pmap) { pmap->pm_asn[PCPU_GET(cpuid)].gen = 0; } struct pmap_invalidate_page_arg { pmap_t pmap; vm_offset_t va; }; static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { #ifdef SMP struct pmap_invalidate_page_arg arg; arg.pmap = pmap; arg.va = va; smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *) &arg); } static void pmap_invalidate_page_action(void *arg) { pmap_t pmap = ((struct pmap_invalidate_page_arg *) arg)->pmap; vm_offset_t va = ((struct pmap_invalidate_page_arg *) arg)->va; #endif if (pmap->pm_active & PCPU_GET(cpumask)) { ALPHA_TBIS(va); alpha_pal_imb(); /* XXX overkill? */ } else { pmap_invalidate_asn(pmap); } } static void pmap_invalidate_all(pmap_t pmap) { #ifdef SMP smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *) pmap); } static void pmap_invalidate_all_action(void *arg) { pmap_t pmap = (pmap_t) arg; #endif if (pmap->pm_active & PCPU_GET(cpumask)) { ALPHA_TBIA(); alpha_pal_imb(); /* XXX overkill? */ } else pmap_invalidate_asn(pmap); } static void pmap_get_asn(pmap_t pmap) { if (PCPU_GET(next_asn) > pmap_maxasn) { /* * Start a new ASN generation. * * Invalidate all per-process mappings and I-cache */ PCPU_SET(next_asn, 0); PCPU_SET(current_asngen, (PCPU_GET(current_asngen) + 1) & ASNGEN_MASK); if (PCPU_GET(current_asngen) == 0) { /* * Clear the pm_asn[].gen of all pmaps. * This is safe since it is only called from * pmap_activate after it has deactivated * the old pmap and it only affects this cpu. */ pmap_t tpmap; #ifdef PMAP_DIAGNOSTIC printf("pmap_get_asn: generation rollover\n"); #endif PCPU_SET(current_asngen, 1); mtx_lock_spin(&allpmaps_lock); LIST_FOREACH(tpmap, &allpmaps, pm_list) { tpmap->pm_asn[PCPU_GET(cpuid)].gen = 0; } mtx_unlock_spin(&allpmaps_lock); } /* * Since we are about to start re-using ASNs, we must * clear out the TLB and the I-cache since they are tagged * with the ASN. */ ALPHA_TBIAP(); alpha_pal_imb(); /* XXX overkill? */ } pmap->pm_asn[PCPU_GET(cpuid)].asn = PCPU_GET(next_asn); PCPU_SET(next_asn, PCPU_GET(next_asn) + 1); pmap->pm_asn[PCPU_GET(cpuid)].gen = PCPU_GET(current_asngen); } /*************************************************** * Low level helper routines..... ***************************************************/ /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { pt_entry_t *pte; vm_paddr_t pa; pa = 0; PMAP_LOCK(pmap); pte = pmap_lev3pte(pmap, va); if (pte != NULL && pmap_pte_v(pte)) pa = pmap_pte_pa(pte); PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pt_entry_t *pte; vm_page_t m; m = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); pte = pmap_lev3pte(pmap, va); if (pte != NULL && pmap_pte_v(pte) && (*pte & pte_prot(pmap, prot)) == pte_prot(pmap, prot)) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); vm_page_hold(m); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); return (m); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; pt_entry_t *pte; for (i = 0; i < count; i++) { vm_offset_t tva = va + i * PAGE_SIZE; pt_entry_t npte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m[i])) | PG_ASM | PG_KRE | PG_KWE | PG_V; pt_entry_t opte; pte = vtopte(tva); opte = *pte; *pte = npte; if (opte) pmap_invalidate_page(kernel_pmap, tva); } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(va, count) vm_offset_t va; int count; { int i; register pt_entry_t *pte; for (i = 0; i < count; i++) { pte = vtopte(va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); va += PAGE_SIZE; } } /* * add a wired page to the kva * note that in order for the mapping to take effect -- you * should do a invltlb after doing the pmap_kenter... */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_offset_t pa) { pt_entry_t *pte; pt_entry_t npte, opte; npte = pmap_phys_to_pte(pa) | PG_ASM | PG_KRE | PG_KWE | PG_V; pte = vtopte(va); opte = *pte; *pte = npte; if (opte) pmap_invalidate_page(kernel_pmap, va); } /* * remove a page from the kernel pagetables */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { register pt_entry_t *pte; pte = vtopte(va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { return ALPHA_PHYS_TO_K0SEG(start); } /*************************************************** * Page table page management routines..... ***************************************************/ /* * This routine unholds page table pages, and if the hold count * drops to zero, then it decrements the wire count. */ static PMAP_INLINE int pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) { --m->wire_count; if (m->wire_count == 0) return _pmap_unwire_pte_hold(pmap, va, m); else return 0; } static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) { vm_offset_t pteva; pt_entry_t* pte; /* * unmap the page table page */ if (m->pindex >= NUSERLEV3MAPS) { /* Level 2 page table */ pte = pmap_lev1pte(pmap, va); pteva = (vm_offset_t) PTlev2 + alpha_ptob(m->pindex - NUSERLEV3MAPS); } else { /* Level 3 page table */ pte = pmap_lev2pte(pmap, va); pteva = (vm_offset_t) PTmap + alpha_ptob(m->pindex); } *pte = 0; if (m->pindex < NUSERLEV3MAPS) { /* unhold the level 2 page table */ vm_page_t lev2pg; lev2pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pmap_lev1pte(pmap, va))); pmap_unwire_pte_hold(pmap, va, lev2pg); } --pmap->pm_stats.resident_count; /* * Do a invltlb to make the invalidated mapping * take effect immediately. */ pmap_invalidate_page(pmap, pteva); if (pmap->pm_ptphint == m) pmap->pm_ptphint = NULL; vm_page_free_zero(m); atomic_subtract_int(&cnt.v_wire_count, 1); return 1; } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) { unsigned ptepindex; if (va >= VM_MAXUSER_ADDRESS) return 0; if (mpte == NULL) { ptepindex = (va >> ALPHA_L2SHIFT); if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; } else { mpte = PHYS_TO_VM_PAGE(pmap_pte_pa(pmap_lev2pte(pmap, va))); pmap->pm_ptphint = mpte; } } return pmap_unwire_pte_hold(pmap, va, mpte); } void pmap_pinit0(pmap) struct pmap *pmap; { int i; PMAP_LOCK_INIT(pmap); pmap->pm_lev1 = Lev1map; pmap->pm_ptphint = NULL; pmap->pm_active = 0; for (i = 0; i < MAXCPU; i++) { pmap->pm_asn[i].asn = 0; pmap->pm_asn[i].gen = 0; } TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN | MTX_QUIET); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(pmap) register struct pmap *pmap; { vm_page_t lev1pg; int i; PMAP_LOCK_INIT(pmap); /* * allocate the page directory page */ while ((lev1pg = vm_page_alloc(NULL, NUSERLEV3MAPS + NUSERLEV2MAPS, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) VM_WAIT; pmap->pm_lev1 = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(lev1pg)); if ((lev1pg->flags & PG_ZERO) == 0) bzero(pmap->pm_lev1, PAGE_SIZE); /* install self-referential address mapping entry (not PG_ASM) */ pmap->pm_lev1[PTLEV1I] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(lev1pg)) | PG_V | PG_KRE | PG_KWE; pmap->pm_ptphint = NULL; pmap->pm_active = 0; for (i = 0; i < MAXCPU; i++) { pmap->pm_asn[i].asn = 0; pmap->pm_asn[i].gen = 0; } TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); mtx_lock_spin(&allpmaps_lock); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); } /* * this routine is called if the page table page is not * mapped correctly. */ static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) { pt_entry_t* pte; vm_offset_t ptepa; vm_page_t m; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); /* * Find or fabricate a new pagetable page */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); vm_page_unlock_queues(); VM_WAIT; vm_page_lock_queues(); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ pmap->pm_stats.resident_count++; ptepa = VM_PAGE_TO_PHYS(m); if (ptepindex >= NUSERLEV3MAPS) { pte = &pmap->pm_lev1[ptepindex - NUSERLEV3MAPS]; } else { int l1index = ptepindex >> ALPHA_PTSHIFT; pt_entry_t* l1pte = &pmap->pm_lev1[l1index]; pt_entry_t* l2map; if (!pmap_pte_v(l1pte)) { if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index, flags) == NULL) { --m->wire_count; vm_page_free(m); return (NULL); } } else { vm_page_t l2page; l2page = PHYS_TO_VM_PAGE(pmap_pte_pa(l1pte)); l2page->wire_count++; } l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); pte = &l2map[ptepindex & ((1 << ALPHA_PTSHIFT) - 1)]; } *pte = pmap_phys_to_pte(ptepa) | PG_KRE | PG_KWE | PG_V; /* * Set the page table hint */ pmap->pm_ptphint = m; return m; } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va) { unsigned ptepindex; pt_entry_t* lev2pte; vm_page_t m; /* * Calculate pagetable page index */ ptepindex = va >> (PAGE_SHIFT + ALPHA_PTSHIFT); retry: /* * Get the level2 entry */ lev2pte = pmap_lev2pte(pmap, va); /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (lev2pte && pmap_pte_v(lev2pte)) { /* * In order to get the page table page, try the * hint first. */ if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { m = pmap->pm_ptphint; } else { m = PHYS_TO_VM_PAGE(pmap_pte_pa(lev2pte)); pmap->pm_ptphint = m; } m->wire_count++; } else { /* * Here if the pte page isn't mapped, or if it has been * deallocated. */ m = _pmap_allocpte(pmap, ptepindex, M_WAITOK); if (m == NULL) goto retry; } return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t lev1pg; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); lev1pg = PHYS_TO_VM_PAGE(pmap_pte_pa(&pmap->pm_lev1[PTLEV1I])); KASSERT(lev1pg->pindex == NUSERLEV3MAPS + NUSERLEV2MAPS, ("pmap_release: PTLEV1I page has unexpected pindex %ld", lev1pg->pindex)); mtx_lock_spin(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); /* * Level1 pages need to have the kernel * stuff cleared, so they can go into the zero queue also. */ bzero(pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); pmap->pm_lev1[PTLEV1I] = 0; PMAP_LOCK_DESTROY(pmap); vm_page_lock_queues(); lev1pg->wire_count--; atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free_zero(lev1pg); vm_page_unlock_queues(); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { /* XXX come back to this */ struct pmap *pmap; pt_entry_t* pte; pt_entry_t newlev1, newlev2; vm_offset_t pa; vm_page_t nkpg; critical_enter(); if (kernel_vm_end == 0) { kernel_vm_end = VM_MIN_KERNEL_ADDRESS;; /* Count the level 2 page tables */ nklev2 = 0; nklev3 = 0; while (pmap_pte_v(pmap_lev1pte(kernel_pmap, kernel_vm_end))) { nklev2++; nklev3 += (1L << ALPHA_PTSHIFT); kernel_vm_end += ALPHA_L1SIZE; } /* Count the level 3 page tables in the last level 2 page table */ kernel_vm_end -= ALPHA_L1SIZE; nklev3 -= (1 << ALPHA_PTSHIFT); while (pmap_pte_v(pmap_lev2pte(kernel_pmap, kernel_vm_end))) { nklev3++; kernel_vm_end += ALPHA_L2SIZE; } } addr = (addr + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); while (kernel_vm_end < addr) { /* * If the level 1 pte is invalid, allocate a new level 2 page table */ pte = pmap_lev1pte(kernel_pmap, kernel_vm_end); if (!pmap_pte_v(pte)) { int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I; nkpg = vm_page_alloc(NULL, pindex, VM_ALLOC_NOOBJ | VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); printf("pmap_growkernel: growing to %lx\n", addr); printf("pmap_growkernel: adding new level2 page table\n"); nklev2++; pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); newlev1 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; mtx_lock_spin(&allpmaps_lock); LIST_FOREACH(pmap, &allpmaps, pm_list) { *pmap_lev1pte(pmap, kernel_vm_end) = newlev1; } mtx_unlock_spin(&allpmaps_lock); *pte = newlev1; pmap_invalidate_all(kernel_pmap); } /* * If the level 2 pte is invalid, allocate a new level 3 page table */ pte = pmap_lev2pte(kernel_pmap, kernel_vm_end); if (pmap_pte_v(pte)) { kernel_vm_end = (kernel_vm_end + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); continue; } /* * This index is bogus, but out of the way */ nkpg = vm_page_alloc(NULL, nklev3, VM_ALLOC_NOOBJ | VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nklev3++; pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; *pte = newlev2; kernel_vm_end = (kernel_vm_end + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1); } critical_exit(); } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. */ static pv_entry_t get_pv_entry(pmap_t locked_pmap) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; struct vpgqueues *vpq; pmap_t pmap; pt_entry_t *pte, tpte; pv_entry_t allocated_pv, next_pv, pv; vm_offset_t va; vm_page_t m; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); allocated_pv = uma_zalloc(pvzone, M_NOWAIT); if (allocated_pv != NULL) { pv_entry_count++; if (pv_entry_count > pv_entry_high_water) pagedaemon_wakeup(); else return (allocated_pv); } /* * Reclaim pv entries: At first, destroy mappings to inactive * pages. After that, if a pv entry is still needed, destroy * mappings to active pages. */ if (ratecheck(&lastprint, &printinterval)) printf("Approaching the limit on PV entries, " "increase the vm.pmap.shpgperproc tunable.\n"); vpq = &vm_page_queues[PQ_INACTIVE]; retry: TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy || (m->flags & PG_BUSY)) continue; TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { va = pv->pv_va; pmap = pv->pv_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) PMAP_LOCK(pmap); else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) continue; pmap->pm_stats.resident_count--; pte = pmap_lev3pte(pmap, va); tpte = *pte; *pte = 0; KASSERT((tpte & PG_W) == 0, ("get_pv_entry: wired pte %#lx", tpte)); if ((tpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); if ((tpte & PG_FOW) == 0) vm_page_dirty(m); pmap_invalidate_page(pmap, va); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); m->md.pv_list_count--; pmap_unuse_pt(pmap, va, pv->pv_ptem); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); if (allocated_pv == NULL) allocated_pv = pv; else free_pv_entry(pv); } } if (allocated_pv == NULL) { if (vpq == &vm_page_queues[PQ_INACTIVE]) { vpq = &vm_page_queues[PQ_ACTIVE]; goto retry; } panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); } return (allocated_pv); } static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; int rtval; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } rtval = 0; if (pv) { rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } return rtval; } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(pmap); pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { pv_entry_count++; pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; return (TRUE); } else return (FALSE); } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) { pt_entry_t oldpte; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = *ptq; *ptq = 0; if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(&oldpte)); if ((oldpte & PG_FOW) == 0) vm_page_dirty(m); if ((oldpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); } } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va) { register pt_entry_t *ptq; PMAP_LOCK_ASSERT(pmap, MA_OWNED); ptq = pmap_lev3pte(pmap, va); /* * if there is no pte for this address, just skip it!!! */ if (!ptq || !pmap_pte_v(ptq)) return; /* * get a local va for mappings for this pmap. */ (void) pmap_remove_pte(pmap, ptq, va); pmap_invalidate_page(pmap, va); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va, nva; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; vm_page_lock_queues(); PMAP_LOCK(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pmap_remove_page(pmap, sva); goto out; } for (va = sva; va < eva; va = nva) { if (!pmap_pte_v(pmap_lev1pte(pmap, va))) { nva = alpha_l1trunc(va + ALPHA_L1SIZE); continue; } if (!pmap_pte_v(pmap_lev2pte(pmap, va))) { nva = alpha_l2trunc(va + ALPHA_L2SIZE); continue; } pmap_remove_page(pmap, va); nva = va + PAGE_SIZE; } out: vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { register pv_entry_t pv; pt_entry_t *pte, tpte; #if defined(PMAP_DIAGNOSTIC) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ if (m->flags & PG_FICTITIOUS) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); } #endif while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); pv->pv_pmap->pm_stats.resident_count--; if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m)) panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); tpte = *pte; *pte = 0; if (tpte & PG_W) pv->pv_pmap->pm_stats.wired_count--; /* * Update the vm_page_t clean and reference bits. */ if ((tpte & PG_FOW) == 0) vm_page_dirty(m); if ((tpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); PMAP_UNLOCK(pv->pv_pmap); free_pv_entry(pv); } vm_page_flag_clear(m, PG_WRITEABLE); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pt_entry_t* pte; int newprot; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; newprot = pte_prot(pmap, prot); if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); vm_page_lock_queues(); PMAP_LOCK(pmap); while (sva < eva) { /* * If level 1 pte is invalid, skip this segment */ pte = pmap_lev1pte(pmap, sva); if (!pmap_pte_v(pte)) { sva = alpha_l1trunc(sva) + ALPHA_L1SIZE; continue; } /* * If level 2 pte is invalid, skip this segment */ pte = pmap_lev2pte(pmap, sva); if (!pmap_pte_v(pte)) { sva = alpha_l2trunc(sva) + ALPHA_L2SIZE; continue; } /* * If level 3 pte is invalid, skip this page */ pte = pmap_lev3pte(pmap, sva); if (!pmap_pte_v(pte)) { sva += PAGE_SIZE; continue; } if (pmap_pte_prot(pte) != newprot) { pt_entry_t oldpte = *pte; vm_page_t m = NULL; if ((oldpte & PG_FOR) == 0) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); vm_page_flag_set(m, PG_REFERENCED); oldpte |= (PG_FOR | PG_FOE); } if ((oldpte & PG_FOW) == 0) { if (m == NULL) m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); vm_page_dirty(m); oldpte |= PG_FOW; } oldpte = (oldpte & ~PG_PROT) | newprot; *pte = oldpte; pmap_invalidate_page(pmap, sva); } sva += PAGE_SIZE; } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_offset_t pa; pt_entry_t *pte; vm_offset_t opa; pt_entry_t origpte, newpte; vm_page_t mpte; int managed; va &= ~PAGE_MASK; #ifdef PMAP_DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); #endif mpte = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { mpte = pmap_allocpte(pmap, va); } pte = pmap_lev3pte(pmap, va); /* * Page Directory table entry not valid, we need a new PT page */ if (pte == NULL) { panic("pmap_enter: invalid kernel page tables pmap=%p, va=0x%lx\n", pmap, va); } origpte = *pte; pa = VM_PAGE_TO_PHYS(m); managed = 0; opa = pmap_pte_pa(pte); /* * Mapping has not changed, must be protection or wiring change. */ if (origpte && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && ((origpte & PG_W) == 0)) pmap->pm_stats.wired_count++; else if (!wired && (origpte & PG_W)) pmap->pm_stats.wired_count--; /* * Remove extra pte reference */ if (mpte) mpte->wire_count--; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { if ((origpte & PG_FOW) != PG_FOW) vm_page_dirty(m); } managed = origpte & PG_MANAGED; goto validate; } /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. */ if (opa) { int err; err = pmap_remove_pte(pmap, pte, va); if (err) panic("pmap_enter: pte vanished, va: 0x%lx", va); } /* * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, mpte, m); managed |= PG_MANAGED; } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. */ newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed; if (managed) { /* * Set up referenced/modified emulation for the new * mapping. Any old referenced/modified emulation * results for the old mapping will have been recorded * either in pmap_remove_pte() or above in the code * which handles protection and/or wiring changes. */ newpte |= (PG_FOR | PG_FOW | PG_FOE); } if (wired) newpte |= PG_W; /* * if the mapping or permission bits are different, we need * to update the pte. */ if (origpte != newpte) { *pte = newpte; if (origpte) pmap_invalidate_page(pmap, va); if (prot & VM_PROT_EXECUTE) alpha_pal_imb(); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); mpte = NULL; m = m_start; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot, mpte); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ -vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { PMAP_LOCK(pmap); - mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); + (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); PMAP_UNLOCK(pmap); - return (mpte); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { register pt_entry_t *pte; int managed; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { unsigned ptepindex; pt_entry_t* l2pte; /* * Calculate lev2 page index */ ptepindex = va >> ALPHA_L2SHIFT; if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { /* * Get the level 2 entry */ l2pte = pmap_lev2pte(pmap, va); /* * If the level 2 page table is mapped, we just increment * the hold count, and activate it. */ if (l2pte && pmap_pte_v(l2pte)) { if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; } else { mpte = PHYS_TO_VM_PAGE(pmap_pte_pa(l2pte)); pmap->pm_ptphint = mpte; } mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT); if (mpte == NULL) return (mpte); } } } else { mpte = NULL; } /* * This call to vtopte makes the assumption that we are * entering the page into the current pmap. In order to support * quick entry into any pmap, one would likely use pmap_pte_quick. * But that isn't as quick as vtopte. */ pte = vtopte(va); if (*pte) { if (mpte != NULL) { pmap_unwire_pte_hold(pmap, va, mpte); mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ managed = 0; if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) { if (!pmap_try_insert_pv_entry(pmap, va, mpte, m)) { if (mpte != NULL) { pmap_unwire_pte_hold(pmap, va, mpte); mpte = NULL; } return (mpte); } managed = PG_MANAGED | PG_FOR | PG_FOW | PG_FOE; } /* * Increment counters */ pmap->pm_stats.resident_count++; /* * Validate the mapping with limited access, read and/or execute but * not write. */ *pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | pte_prot(pmap, prot & (VM_PROT_READ | VM_PROT_EXECUTE)) | managed; return mpte; } /* * Make temporary mapping for a physical address. This is called * during dump. */ void * pmap_kenter_temporary(vm_offset_t pa, int i) { return (void *) ALPHA_PHYS_TO_K0SEG(pa - (i * PAGE_SIZE)); } /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { pt_entry_t *pte; PMAP_LOCK(pmap); pte = pmap_lev3pte(pmap, va); if (wired && !pmap_pte_w(pte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_pte_w(pte)) pmap->pm_stats.wired_count--; /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ pmap_pte_set_w(pte, wired); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_pagezero idle process. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(mdst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap, m) pmap_t pmap; vm_page_t m; { pv_entry_t pv; int loops = 0; if (m->flags & PG_FICTITIOUS) return FALSE; /* * Not found, check current mappings returning immediately if found. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { return TRUE; } loops++; if (loops >= 16) break; } return (FALSE); } #define PMAP_REMOVE_PAGES_CURPROC_ONLY /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap, sva, eva) pmap_t pmap; vm_offset_t sva, eva; { pt_entry_t *pte, tpte; vm_page_t m; pv_entry_t pv, npv; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } #endif vm_page_lock_queues(); PMAP_LOCK(pmap); for(pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_va >= eva || pv->pv_va < sva) { npv = TAILQ_NEXT(pv, pv_plist); continue; } #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY pte = vtopte(pv->pv_va); #else pte = pmap_pte_quick(pmap, pv->pv_va); #endif if (!pmap_pte_v(pte)) panic("pmap_remove_pages: page on pm_pvlist has no pte\n"); tpte = *pte; /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { npv = TAILQ_NEXT(pv, pv_plist); continue; } *pte = 0; m = PHYS_TO_VM_PAGE(pmap_pte_pa(&tpte)); pmap->pm_stats.resident_count--; if ((tpte & PG_FOW) == 0) vm_page_dirty(m); npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); } /* * this routine is used to modify bits in ptes */ static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem) { pv_entry_t pv; pt_entry_t *pte; int changed; if ((m->flags & PG_FICTITIOUS) || (!setem && bit == (PG_UWE|PG_KWE) && (m->flags & PG_WRITEABLE) == 0)) return; changed = 0; /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); changed = 0; if (setem) { *pte |= bit; changed = 1; } else { pt_entry_t pbits = *pte; if (pbits & bit) { changed = 1; *pte = pbits & ~bit; } } if (changed) pmap_invalidate_page(pv->pv_pmap, pv->pv_va); PMAP_UNLOCK(pv->pv_pmap); } if (!setem && bit == (PG_UWE|PG_KWE)) vm_page_flag_clear(m, PG_WRITEABLE); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { pmap_changebit(m, PG_KWE|PG_UWE, FALSE); } else { pmap_remove_all(m); } } } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; int count; if (m->flags & PG_FICTITIOUS) return 0; /* * Loop over current mappings looking for any which have don't * have PG_FOR set (i.e. ones where we have taken an emulate * reference trap recently). */ count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & PG_FOR)) { count++; *pte |= PG_FOR | PG_FOE; pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } PMAP_UNLOCK(pv->pv_pmap); } return count; } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; boolean_t rv; rv = FALSE; if (m->flags & PG_FICTITIOUS) return (rv); /* * A page is modified if any mapping has had its PG_FOW flag * cleared. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); rv = !(*pte & PG_FOW); PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt_entry_t *pte; boolean_t rv; rv = FALSE; PMAP_LOCK(pmap); if (pmap_pte_v(pmap_lev1pte(pmap, addr)) && pmap_pte_v(pmap_lev2pte(pmap, addr))) { pte = vtopte(addr); rv = *pte == 0; } PMAP_UNLOCK(pmap); return (rv); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; if (m->flags & PG_FICTITIOUS) return; /* * Loop over current mappings setting PG_FOW where needed. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & PG_FOW)) { *pte |= PG_FOW; pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } PMAP_UNLOCK(pv->pv_pmap); } } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; if (m->flags & PG_FICTITIOUS) return; /* * Loop over current mappings setting PG_FOR and PG_FOE where needed. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); if (!(*pte & (PG_FOR | PG_FOE))) { *pte |= (PG_FOR | PG_FOE); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } PMAP_UNLOCK(pv->pv_pmap); } } /* * pmap_emulate_reference: * * Emulate reference and/or modified bit hits. * From NetBSD */ void pmap_emulate_reference(struct vmspace *vm, vm_offset_t v, int user, int write) { pmap_t pmap; pt_entry_t *pte; /* * Convert process and virtual address to physical address. */ if (v >= VM_MIN_KERNEL_ADDRESS) { if (user) panic("pmap_emulate_reference: user ref to kernel"); pmap = kernel_pmap; PMAP_LOCK(pmap); pte = vtopte(v); } else { KASSERT(vm != NULL, ("pmap_emulate_reference: bad vmspace")); pmap = &vm->vm_pmap; PMAP_LOCK(pmap); pte = pmap_lev3pte(pmap, v); } /* * Another CPU can modify the pmap between the emulation trap and this * CPU locking the pmap. As a result, the pte may be inconsistent * with the access that caused the emulation trap. In such cases, * invalidate this CPU's TLB entry and return. */ if (!pmap_pte_v(pte)) goto tbis; /* * Twiddle the appropriate bits to reflect the reference * and/or modification.. * * The rules: * (1) always mark page as used, and * (2) if it was a write fault, mark page as modified. */ if (write) { if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE))) goto tbis; if (!(*pte & PG_FOW)) goto tbis; *pte &= ~(PG_FOR | PG_FOE | PG_FOW); } else { if (!(*pte & (user ? PG_URE : PG_URE | PG_KRE))) goto tbis; if (!(*pte & (PG_FOR | PG_FOE))) goto tbis; *pte &= ~(PG_FOR | PG_FOE); } tbis: ALPHA_TBIS(v); PMAP_UNLOCK(pmap); } /* * Miscellaneous support routines follow */ static void alpha_protection_init() { int prot, *kp, *up; kp = protection_codes[0]; up = protection_codes[1]; for (prot = 0; prot < 8; prot++) { switch (prot) { case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: *kp++ = PG_ASM; *up++ = 0; break; case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: *kp++ = PG_ASM | PG_KRE; *up++ = PG_URE | PG_KRE; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: *kp++ = PG_ASM | PG_KWE; *up++ = PG_UWE | PG_KWE; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: *kp++ = PG_ASM | PG_KWE | PG_KRE; *up++ = PG_UWE | PG_URE | PG_KWE | PG_KRE; break; } } } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(pa, size) vm_offset_t pa; vm_size_t size; { return (void*) ALPHA_PHYS_TO_K0SEG(pa); } void pmap_unmapdev(va, size) vm_offset_t va; vm_size_t size; { } /* * perform the pmap work for mincore */ int pmap_mincore(pmap, addr) pmap_t pmap; vm_offset_t addr; { pt_entry_t *ptep, pte; int val = 0; PMAP_LOCK(pmap); ptep = pmap_lev3pte(pmap, addr); pte = (ptep != NULL) ? *ptep : 0; PMAP_UNLOCK(pmap); if (pte & PG_V) { vm_page_t m; vm_offset_t pa; val = MINCORE_INCORE; if ((pte & PG_MANAGED) == 0) return val; pa = alpha_ptob(ALPHA_PTE_TO_PFN(pte)); m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if ((pte & PG_FOW) == 0) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; else { /* * Modified by someone */ vm_page_lock_queues(); if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; vm_page_unlock_queues(); } /* * Referenced by us */ if ((pte & (PG_FOR | PG_FOE)) == 0) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; else { /* * Referenced by someone */ vm_page_lock_queues(); if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } vm_page_unlock_queues(); } } return val; } void pmap_activate(struct thread *td) { pmap_t pmap; pmap = vmspace_pmap(td->td_proc->p_vmspace); critical_enter(); if (pmap_active[PCPU_GET(cpuid)] && pmap != pmap_active[PCPU_GET(cpuid)]) { atomic_clear_32(&pmap_active[PCPU_GET(cpuid)]->pm_active, PCPU_GET(cpumask)); pmap_active[PCPU_GET(cpuid)] = 0; } td->td_pcb->pcb_hw.apcb_ptbr = ALPHA_K0SEG_TO_PHYS((vm_offset_t) pmap->pm_lev1) >> PAGE_SHIFT; if (pmap->pm_asn[PCPU_GET(cpuid)].gen != PCPU_GET(current_asngen)) pmap_get_asn(pmap); pmap_active[PCPU_GET(cpuid)] = pmap; atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask)); td->td_pcb->pcb_hw.apcb_asn = pmap->pm_asn[PCPU_GET(cpuid)].asn; critical_exit(); if (td == curthread) { alpha_pal_swpctx((u_long)td->td_md.md_pcbpaddr); } } void pmap_deactivate(struct thread *td) { pmap_t pmap; pmap = vmspace_pmap(td->td_proc->p_vmspace); atomic_clear_32(&pmap->pm_active, PCPU_GET(cpumask)); pmap_active[PCPU_GET(cpuid)] = 0; } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { return addr; } #if 0 #if defined(PMAP_DEBUG) pmap_pid_dump(int pid) { pmap_t pmap; struct proc *p; int npte = 0; int index; sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { if (p->p_pid != pid) continue; if (p->p_vmspace) { int i,j; index = 0; pmap = vmspace_pmap(p->p_vmspace); for (i = 0; i < NPDEPG; i++) { pd_entry_t *pde; pt_entry_t *pte; vm_offset_t base = i << PDRSHIFT; pde = &pmap->pm_pdir[i]; if (pde && pmap_pde_v(pde)) { for (j = 0; j < NPTEPG; j++) { vm_offset_t va = base + (j << PAGE_SHIFT); if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { if (index) { index = 0; printf("\n"); } sx_sunlock(&allproc_lock); return npte; } pte = pmap_pte_quick(pmap, va); if (pte && pmap_pte_v(pte)) { vm_offset_t pa; vm_page_t m; pa = *(int *)pte; m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; if (index >= 2) { index = 0; printf("\n"); } else { printf(" "); } } } } } } } sx_sunlock(&allproc_lock); return npte; } #endif #if defined(DEBUG) static void pads(pmap_t pm); void pmap_pvdump(vm_offset_t pa); /* print address space of pmap*/ static void pads(pm) pmap_t pm; { int i, j; vm_offset_t va; pt_entry_t *ptep; if (pm == kernel_pmap) return; for (i = 0; i < NPDEPG; i++) if (pm->pm_pdir[i]) for (j = 0; j < NPTEPG; j++) { va = (i << PDRSHIFT) + (j << PAGE_SHIFT); if (pm == kernel_pmap && va < KERNBASE) continue; if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) continue; ptep = pmap_pte_quick(pm, va); if (pmap_pte_v(ptep)) printf("%x:%x ", va, *(int *) ptep); }; } void pmap_pvdump(pa) vm_offset_t pa; { pv_entry_t pv; vm_page_t m; printf("pa %x", pa); m = PHYS_TO_VM_PAGE(pa); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); pads(pv->pv_pmap); } printf(" "); } #endif #endif Index: stable/6/sys/amd64/amd64/pmap.c =================================================================== --- stable/6/sys/amd64/amd64/pmap.c (revision 173366) +++ stable/6/sys/amd64/amd64/pmap.c (revision 173367) @@ -1,3255 +1,3253 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * Copyright (c) 2005 Alan L. Cox * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include "opt_msgbuf.h" #include "opt_pmap.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if defined(DIAGNOSTIC) #define PMAP_DIAGNOSTIC #endif #if !defined(PMAP_DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif struct pmap kernel_pmap_store; vm_paddr_t avail_start; /* PA of first available physical page */ vm_paddr_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static int nkpt; static int ndmpdp; static vm_paddr_t dmaplimit; vm_offset_t kernel_vm_end; pt_entry_t pg_nx; static u_int64_t KPTphys; /* phys addr of kernel level 1 */ static u_int64_t KPDphys; /* phys addr of kernel level 2 */ u_int64_t KPDPphys; /* phys addr of kernel level 3 */ u_int64_t KPML4phys; /* phys addr of kernel level 4 */ static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; int pmap_pagedaemon_waken; /* * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP1 = 0; caddr_t CADDR1 = 0; struct msgbuf *msgbufp = 0; /* * Crashdump maps. */ static caddr_t crashdumpmap; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); static void pmap_clear_ptes(vm_page_t m, long bit); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free); static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde); static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags); static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t* free); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); /* * Move the kernel virtual free pointer to the next * 2MB. This is used to help improve performance * by using a large (2MB) page for much of the kernel * (.text, .data, .bss) */ static vm_offset_t pmap_kmem_choose(vm_offset_t addr) { vm_offset_t newaddr = addr; newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); return newaddr; } /********************/ /* Inline functions */ /********************/ /* Return a non-clipped PD index for a given VA */ static __inline vm_pindex_t pmap_pde_pindex(vm_offset_t va) { return va >> PDRSHIFT; } /* Return various clipped indexes for a given VA */ static __inline vm_pindex_t pmap_pte_index(vm_offset_t va) { return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); } static __inline vm_pindex_t pmap_pde_index(vm_offset_t va) { return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); } static __inline vm_pindex_t pmap_pdpe_index(vm_offset_t va) { return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); } static __inline vm_pindex_t pmap_pml4e_index(vm_offset_t va) { return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); } /* Return a pointer to the PML4 slot that corresponds to a VA */ static __inline pml4_entry_t * pmap_pml4e(pmap_t pmap, vm_offset_t va) { if (!pmap) return NULL; return (&pmap->pm_pml4[pmap_pml4e_index(va)]); } /* Return a pointer to the PDP slot that corresponds to a VA */ static __inline pdp_entry_t * pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va) { pdp_entry_t *pdpe; pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME); return (&pdpe[pmap_pdpe_index(va)]); } /* Return a pointer to the PDP slot that corresponds to a VA */ static __inline pdp_entry_t * pmap_pdpe(pmap_t pmap, vm_offset_t va) { pml4_entry_t *pml4e; pml4e = pmap_pml4e(pmap, va); if (pml4e == NULL || (*pml4e & PG_V) == 0) return NULL; return (pmap_pml4e_to_pdpe(pml4e, va)); } /* Return a pointer to the PD slot that corresponds to a VA */ static __inline pd_entry_t * pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va) { pd_entry_t *pde; pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME); return (&pde[pmap_pde_index(va)]); } /* Return a pointer to the PD slot that corresponds to a VA */ static __inline pd_entry_t * pmap_pde(pmap_t pmap, vm_offset_t va) { pdp_entry_t *pdpe; pdpe = pmap_pdpe(pmap, va); if (pdpe == NULL || (*pdpe & PG_V) == 0) return NULL; return (pmap_pdpe_to_pde(pdpe, va)); } /* Return a pointer to the PT slot that corresponds to a VA */ static __inline pt_entry_t * pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) { pt_entry_t *pte; pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); return (&pte[pmap_pte_index(va)]); } /* Return a pointer to the PT slot that corresponds to a VA */ static __inline pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va) { pd_entry_t *pde; pde = pmap_pde(pmap, va); if (pde == NULL || (*pde & PG_V) == 0) return NULL; if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */ return ((pt_entry_t *)pde); return (pmap_pde_to_pte(pde, va)); } static __inline pt_entry_t * pmap_pte_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *ptepde) { pd_entry_t *pde; pde = pmap_pde(pmap, va); if (pde == NULL || (*pde & PG_V) == 0) return NULL; *ptepde = *pde; if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */ return ((pt_entry_t *)pde); return (pmap_pde_to_pte(pde, va)); } PMAP_INLINE pt_entry_t * vtopte(vm_offset_t va) { u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); return (PTmap + ((va >> PAGE_SHIFT) & mask)); } static __inline pd_entry_t * vtopde(vm_offset_t va) { u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); return (PDmap + ((va >> PDRSHIFT) & mask)); } static u_int64_t allocpages(int n) { u_int64_t ret; ret = avail_start; bzero((void *)ret, n * PAGE_SIZE); avail_start += n * PAGE_SIZE; return (ret); } static void create_pagetables(void) { int i; /* Allocate pages */ KPTphys = allocpages(NKPT); KPML4phys = allocpages(1); KPDPphys = allocpages(NKPML4E); KPDphys = allocpages(NKPDPE); ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; if (ndmpdp < 4) /* Minimum 4GB of dirmap */ ndmpdp = 4; DMPDPphys = allocpages(NDMPML4E); DMPDphys = allocpages(ndmpdp); dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; /* Fill in the underlying page table pages */ /* Read-only from zero to physfree */ /* XXX not fully used, underneath 2M pages */ for (i = 0; (i << PAGE_SHIFT) < avail_start; i++) { ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT; ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G; } /* Now map the page tables at their location within PTmap */ for (i = 0; i < NKPT; i++) { ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT); ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V; } /* Map from zero to end of allocations under 2M pages */ /* This replaces some of the KPTphys entries above */ for (i = 0; (i << PDRSHIFT) < avail_start; i++) { ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT; ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G; } /* And connect up the PD to the PDP */ for (i = 0; i < NKPDPE; i++) { ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U; } /* Now set up the direct map space using 2MB pages */ for (i = 0; i < NPDEPG * ndmpdp; i++) { ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT; ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G; } /* And the direct map space's PDP */ for (i = 0; i < ndmpdp; i++) { ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U; } /* And recursively map PML4 to itself in order to get PTmap */ ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys; ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U; /* Connect the Direct Map slot up to the PML4 */ ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys; ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U; /* Connect the KVA slot up to the PML4 */ ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys; ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U; } /* * Bootstrap the system enough to run with virtual memory. * * On amd64 this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address * from the linked base (virtual) address "KERNBASE" to the actual * (physical) address starting relative to 0] */ void pmap_bootstrap(firstaddr) vm_paddr_t *firstaddr; { vm_offset_t va; pt_entry_t *pte, *unused; avail_start = *firstaddr; /* * Create an initial set of page tables to run the kernel in. */ create_pagetables(); *firstaddr = avail_start; virtual_avail = (vm_offset_t) KERNBASE + avail_start; virtual_avail = pmap_kmem_choose(virtual_avail); virtual_end = VM_MAX_KERNEL_ADDRESS; /* XXX do %cr0 as well */ load_cr4(rcr4() | CR4_PGE | CR4_PSE); load_cr3(KPML4phys); /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys); kernel_pmap->pm_active = -1; /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvlist); nkpt = NKPT; /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); va = virtual_avail; pte = vtopte(va); /* * CMAP1 is only used for the memory test. */ SYSMAP(caddr_t, CMAP1, CADDR1, 1) /* * Crashdump maps. */ SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) /* * msgbufp is used to map the system message buffer. */ SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) virtual_avail = va; *CMAP1 = 0; invltlb(); /* Initialize the PAT MSR. */ pmap_init_pat(); } /* * Setup the PAT MSR. */ void pmap_init_pat(void) { uint64_t pat_msr; /* Bail if this CPU doesn't implement PAT. */ if (!(cpu_feature & CPUID_PAT)) panic("no PAT??"); #ifdef PAT_WORKS /* * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. * Program 4 and 5 as WP and WC. * Leave 6 and 7 as UC and UC-. */ pat_msr = rdmsr(MSR_PAT); pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | PAT_VALUE(5, PAT_WRITE_COMBINING); #else /* * Due to some Intel errata, we can only safely use the lower 4 * PAT entries. Thus, just replace PAT Index 2 with WC instead * of UC-. * * Intel Pentium III Processor Specification Update * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B * or Mode C Paging) * * Intel Pentium IV Processor Specification Update * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) */ pat_msr = rdmsr(MSR_PAT); pat_msr &= ~PAT_MASK(2); pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); #endif wrmsr(MSR_PAT, pat_msr); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); } void pmap_init2() { } /*************************************************** * Low level helper routines..... ***************************************************/ /* * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ static int pmap_cache_bits(int mode, boolean_t is_pde) { int pat_flag, pat_index, cache_bits; /* The PAT bit is different for PTE's and PDE's. */ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; /* If we don't support PAT, map extended modes to older ones. */ if (!(cpu_feature & CPUID_PAT)) { switch (mode) { case PAT_UNCACHEABLE: case PAT_WRITE_THROUGH: case PAT_WRITE_BACK: break; case PAT_UNCACHED: case PAT_WRITE_COMBINING: case PAT_WRITE_PROTECTED: mode = PAT_UNCACHEABLE; break; } } /* Map the caching mode to a PAT index. */ switch (mode) { #ifdef PAT_WORKS case PAT_UNCACHEABLE: pat_index = 3; break; case PAT_WRITE_THROUGH: pat_index = 1; break; case PAT_WRITE_BACK: pat_index = 0; break; case PAT_UNCACHED: pat_index = 2; break; case PAT_WRITE_COMBINING: pat_index = 5; break; case PAT_WRITE_PROTECTED: pat_index = 4; break; #else case PAT_UNCACHED: case PAT_UNCACHEABLE: case PAT_WRITE_PROTECTED: pat_index = 3; break; case PAT_WRITE_THROUGH: pat_index = 1; break; case PAT_WRITE_BACK: pat_index = 0; break; case PAT_WRITE_COMBINING: pat_index = 2; break; #endif default: panic("Unknown caching mode %d\n", mode); } /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ cache_bits = 0; if (pat_index & 0x4) cache_bits |= pat_flag; if (pat_index & 0x2) cache_bits |= PG_NC_PCD; if (pat_index & 0x1) cache_bits |= PG_NC_PWT; return (cache_bits); } #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. */ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { u_int cpumask; u_int other_cpus; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { invlpg(va); smp_invlpg(va); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) invlpg(va); if (pmap->pm_active & other_cpus) smp_masked_invlpg(pmap->pm_active & other_cpus, va); } sched_unpin(); } void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { u_int cpumask; u_int other_cpus; vm_offset_t addr; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); smp_invlpg_range(sva, eva); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); if (pmap->pm_active & other_cpus) smp_masked_invlpg_range(pmap->pm_active & other_cpus, sva, eva); } sched_unpin(); } void pmap_invalidate_all(pmap_t pmap) { u_int cpumask; u_int other_cpus; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { invltlb(); smp_invltlb(); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) invltlb(); if (pmap->pm_active & other_cpus) smp_masked_invltlb(pmap->pm_active & other_cpus); } sched_unpin(); } void pmap_invalidate_cache(void) { sched_pin(); wbinvd(); smp_cache_flush(); sched_unpin(); } #else /* !SMP */ /* * Normal, non-SMP, invalidation functions. * We inline these within pmap.c for speed. */ PMAP_INLINE void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { if (pmap == kernel_pmap || pmap->pm_active) invlpg(va); } PMAP_INLINE void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; if (pmap == kernel_pmap || pmap->pm_active) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); } PMAP_INLINE void pmap_invalidate_all(pmap_t pmap) { if (pmap == kernel_pmap || pmap->pm_active) invltlb(); } PMAP_INLINE void pmap_invalidate_cache(void) { wbinvd(); } #endif /* !SMP */ /* * Are we current address space or kernel? */ static __inline int pmap_is_current(pmap_t pmap) { return (pmap == kernel_pmap || (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME)); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { vm_paddr_t rtval; pt_entry_t *pte; pd_entry_t pde, *pdep; rtval = 0; PMAP_LOCK(pmap); pdep = pmap_pde(pmap, va); if (pdep != NULL) { pde = *pdep; if (pde) { if ((pde & PG_PS) != 0) { rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); PMAP_UNLOCK(pmap); return rtval; } pte = pmap_pde_to_pte(pdep, va); rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); } } PMAP_UNLOCK(pmap); return (rtval); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pd_entry_t pde, *pdep; pt_entry_t pte; vm_page_t m; m = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); pdep = pmap_pde(pmap, va); if (pdep != NULL && (pde = *pdep)) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); } } else { pte = *pmap_pde_to_pte(pdep, va); if ((pte & PG_V) && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } } } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); return (m); } vm_paddr_t pmap_kextract(vm_offset_t va) { pd_entry_t *pde; vm_paddr_t pa; if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { pa = DMAP_TO_PHYS(va); } else { pde = vtopde(va); if (*pde & PG_PS) { pa = (*pde & PG_PS_FRAME) | (va & PDRMASK); } else { pa = *vtopte(va); pa = (pa & PG_FRAME) | (va & PAGE_MASK); } } return pa; } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a wired page to the kva. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | PG_G); } PMAP_INLINE void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0)); } /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; pte = vtopte(va); pte_clear(pte); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return PHYS_TO_DMAP(start); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pt_entry_t *endpte, oldpte, *pte; oldpte = 0; pte = vtopte(sva); endpte = pte + count; while (pte < endpte) { oldpte |= *pte; pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | PG_RW | PG_V); pte++; ma++; } if ((oldpte & PG_V) != 0) pmap_invalidate_range(kernel_pmap, sva, sva + count * PAGE_SIZE); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ static PMAP_INLINE void pmap_free_zero_pages(vm_page_t free) { vm_page_t m; while (free != NULL) { m = free; free = m->right; vm_page_free_zero(m); } } /* * This routine unholds page table pages, and if the hold count * drops to zero, then it decrements the wire count. */ static PMAP_INLINE int pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free) { --m->wire_count; if (m->wire_count == 0) return _pmap_unwire_pte_hold(pmap, va, m, free); else return 0; } static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free) { vm_offset_t pteva; /* * unmap the page table page */ if (m->pindex >= (NUPDE + NUPDPE)) { /* PDP page */ pml4_entry_t *pml4; pml4 = pmap_pml4e(pmap, va); pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE)); *pml4 = 0; } else if (m->pindex >= NUPDE) { /* PD page */ pdp_entry_t *pdp; pdp = pmap_pdpe(pmap, va); pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE); *pdp = 0; } else { /* PTE page */ pd_entry_t *pd; pd = pmap_pde(pmap, va); pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex); *pd = 0; } --pmap->pm_stats.resident_count; if (m->pindex < NUPDE) { /* We just released a PT, unhold the matching PD */ vm_page_t pdpg; pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME); pmap_unwire_pte_hold(pmap, va, pdpg, free); } if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) { /* We just released a PD, unhold the matching PDP */ vm_page_t pdppg; pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME); pmap_unwire_pte_hold(pmap, va, pdppg, free); } /* * Do an invltlb to make the invalidated mapping * take effect immediately. */ pmap_invalidate_page(pmap, pteva); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ m->right = *free; *free = m; atomic_subtract_int(&cnt.v_wire_count, 1); return 1; } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free) { vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) return 0; KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); return pmap_unwire_pte_hold(pmap, va, mpte, free); } void pmap_pinit0(pmap) struct pmap *pmap; { PMAP_LOCK_INIT(pmap); pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + KPML4phys); pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(pmap) register struct pmap *pmap; { vm_page_t pml4pg; static vm_pindex_t color; PMAP_LOCK_INIT(pmap); /* * allocate the page directory page */ while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) VM_WAIT; pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg)); if ((pml4pg->flags & PG_ZERO) == 0) pagezero(pmap->pm_pml4); /* Wire in kernel global address entries. */ pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U; pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U; /* install self-referential address mapping entry(s) */ pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } /* * this routine is called if the page table page is not * mapped correctly. * * Note: If a page allocation fails at page table level two or three, * one or two pages may be held during the wait, only to be released * afterwards. This conservative approach is easily argued to avoid * race conditions. */ static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags) { vm_page_t m, pdppg, pdpg; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); vm_page_unlock_queues(); VM_WAIT; vm_page_lock_queues(); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ pmap->pm_stats.resident_count++; if (ptepindex >= (NUPDE + NUPDPE)) { pml4_entry_t *pml4; vm_pindex_t pml4index; /* Wire up a new PDPE page */ pml4index = ptepindex - (NUPDE + NUPDPE); pml4 = &pmap->pm_pml4[pml4index]; *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } else if (ptepindex >= NUPDE) { vm_pindex_t pml4index; vm_pindex_t pdpindex; pml4_entry_t *pml4; pdp_entry_t *pdp; /* Wire up a new PDE page */ pdpindex = ptepindex - NUPDE; pml4index = pdpindex >> NPML4EPGSHIFT; pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pdp, recurse */ if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, flags) == NULL) { --m->wire_count; vm_page_free(m); return (NULL); } } else { /* Add reference to pdp page */ pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME); pdppg->wire_count++; } pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); /* Now find the pdp page */ pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } else { vm_pindex_t pml4index; vm_pindex_t pdpindex; pml4_entry_t *pml4; pdp_entry_t *pdp; pd_entry_t *pd; /* Wire up a new PTE page */ pdpindex = ptepindex >> NPDPEPGSHIFT; pml4index = pdpindex >> NPML4EPGSHIFT; /* First, find the pdp and check that its valid. */ pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, flags) == NULL) { --m->wire_count; vm_page_free(m); return (NULL); } pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; } else { pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; if ((*pdp & PG_V) == 0) { /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, flags) == NULL) { --m->wire_count; vm_page_free(m); return (NULL); } } else { /* Add reference to the pd page */ pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME); pdpg->wire_count++; } } pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME); /* Now we know where the page directory page is */ pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)]; *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } return m; } static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags) { vm_pindex_t pdpindex, ptepindex; pdp_entry_t *pdpe; vm_page_t pdpg; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("pmap_allocpde: flags is neither M_NOWAIT nor M_WAITOK")); retry: pdpe = pmap_pdpe(pmap, va); if (pdpe != NULL && (*pdpe & PG_V) != 0) { /* Add a reference to the pd page. */ pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME); pdpg->wire_count++; } else { /* Allocate a pd page. */ ptepindex = pmap_pde_pindex(va); pdpindex = ptepindex >> NPDPEPGSHIFT; pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags); if (pdpg == NULL && (flags & M_WAITOK)) goto retry; } return (pdpg); } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) { vm_pindex_t ptepindex; pd_entry_t *pd; vm_page_t m, free; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); /* * Calculate pagetable page index */ ptepindex = pmap_pde_pindex(va); retry: /* * Get the page directory entry */ pd = pmap_pde(pmap, va); /* * This supports switching from a 2MB page to a * normal 4K page. */ if (pd != 0 && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) { *pd = 0; pd = 0; pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; free = NULL; pmap_unuse_pt(pmap, va, *pmap_pdpe(pmap, va), &free); pmap_invalidate_all(kernel_pmap); pmap_free_zero_pages(free); } /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (pd != 0 && (*pd & PG_V) != 0) { m = PHYS_TO_VM_PAGE(*pd & PG_FRAME); m->wire_count++; } else { /* * Here if the pte page isn't mapped, or if it has been * deallocated. */ m = _pmap_allocpte(pmap, ptepindex, flags); if (m == NULL && (flags & M_WAITOK)) goto retry; } return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME); pmap->pm_pml4[KPML4I] = 0; /* KVA */ pmap->pm_pml4[DMPML4I] = 0; /* Direct Map */ pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ vm_page_lock_queues(); m->wire_count--; atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free_zero(m); vm_page_unlock_queues(); PMAP_LOCK_DESTROY(pmap); } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "LU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "LU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; pd_entry_t *pde, newpdir; pdp_entry_t newpdp; mtx_assert(&kernel_map->system_mtx, MA_OWNED); if (kernel_vm_end == 0) { kernel_vm_end = KERNBASE; nkpt = 0; while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) { kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); nkpt++; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } addr = roundup2(addr, PAGE_SIZE * NPTEPG); if (addr - 1 >= kernel_map->max_offset) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { pde = pmap_pde(kernel_pmap, kernel_vm_end); if (pde == NULL) { /* We need a new PDP entry */ nkpg = vm_page_alloc(NULL, nkpt, VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); newpdp = (pdp_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M); *pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp; continue; /* try again */ } if ((*pde & PG_V) != 0) { kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } continue; } /* * This index is bogus, but out of the way */ nkpg = vm_page_alloc(NULL, nkpt, VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nkpt++; pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M); *pmap_pde(kernel_pmap, kernel_vm_end) = newpdir; kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t get_pv_entry(void) { pv_entry_count++; if ((pv_entry_count > pv_entry_high_water) && (pmap_pagedaemon_waken == 0)) { pmap_pagedaemon_waken = 1; wakeup (&vm_pages_needed); } return uma_zalloc(pvzone, M_NOWAIT); } static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } KASSERT(pv != NULL, ("pmap_remove_entry: pv not found")); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(); if (pv == NULL) panic("no pv entries: increase vm.pmap.shpgperproc"); pv->pv_va = va; pv->pv_pmap = pmap; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { pv_entry_count++; pv->pv_va = va; pv->pv_pmap = pmap; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; return (TRUE); } else return (FALSE); } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free) { pt_entry_t oldpte; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = pte_load_clear(ptq); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; /* * Machines that don't support invlpg, also don't support * PG_G. */ if (oldpte & PG_G) pmap_invalidate_page(kernel_pmap, va); pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); if (oldpte & PG_M) { KASSERT((oldpte & PG_RW), ("pmap_remove_pte: modified page not writable: va: %#lx, pte: %#lx", va, oldpte)); vm_page_dirty(m); } if (oldpte & PG_A) vm_page_flag_set(m, PG_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, ptepde, free)); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde) { pt_entry_t *pte; vm_page_t free = NULL; PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((*pde & PG_V) == 0) return; pte = pmap_pde_to_pte(pde, va); if ((*pte & PG_V) == 0) return; pmap_remove_pte(pmap, pte, va, *pde, &free); pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte; vm_page_t free = NULL; int anyvalid; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; vm_page_lock_queues(); PMAP_LOCK(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pde = pmap_pde(pmap, sva); if (pde && (*pde & PG_PS) == 0) { pmap_remove_page(pmap, sva, pde); goto out; } } for (; sva < eva; sva = va_next) { if (pmap->pm_stats.resident_count == 0) break; pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; continue; } /* * Calculate index for next page table. */ va_next = (sva + NBPDR) & ~PDRMASK; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; /* * Weed out invalid mappings. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { *pde = 0; pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; pmap_unuse_pt(pmap, sva, *pdpe, &free); anyvalid = 1; continue; } /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (va_next > eva) va_next = eva; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { if (*pte == 0) continue; /* * The TLB entry for a PG_G mapping is invalidated * by pmap_remove_pte(). */ if ((*pte & PG_G) == 0) anyvalid = 1; if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free)) break; } } out: if (anyvalid) { pmap_invalidate_all(pmap); pmap_free_zero_pages(free); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { register pv_entry_t pv; pt_entry_t *pte, tpte; pd_entry_t ptepde; vm_page_t free; #if defined(PMAP_DIAGNOSTIC) /* * XXX This makes pmap_remove_all() illegal for non-managed pages! */ if (m->flags & PG_FICTITIOUS) { panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); } #endif mtx_assert(&vm_page_queue_mtx, MA_OWNED); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { PMAP_LOCK(pv->pv_pmap); pv->pv_pmap->pm_stats.resident_count--; pte = pmap_pte_pde(pv->pv_pmap, pv->pv_va, &ptepde); tpte = pte_load_clear(pte); if (tpte & PG_W) pv->pv_pmap->pm_stats.wired_count--; if (tpte & PG_A) vm_page_flag_set(m, PG_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if (tpte & PG_M) { KASSERT((tpte & PG_RW), ("pmap_remove_all: modified page not writable: va: %#lx, pte: %#lx", pv->pv_va, tpte)); vm_page_dirty(m); } free = NULL; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, ptepde, &free); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); pmap_free_zero_pages(free); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; PMAP_UNLOCK(pv->pv_pmap); free_pv_entry(pv); } vm_page_flag_clear(m, PG_WRITEABLE); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte; int anychanged; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; anychanged = 0; vm_page_lock_queues(); PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; continue; } va_next = (sva + NBPDR) & ~PDRMASK; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; /* * Weed out invalid mappings. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { if ((prot & VM_PROT_WRITE) == 0) *pde &= ~(PG_M|PG_RW); if ((prot & VM_PROT_EXECUTE) == 0) *pde |= pg_nx; anychanged = 1; continue; } if (va_next > eva) va_next = eva; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { pt_entry_t obits, pbits; vm_page_t m; retry: obits = pbits = *pte; if ((pbits & PG_V) == 0) continue; if (pbits & PG_MANAGED) { m = NULL; if (pbits & PG_A) { m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); vm_page_flag_set(m, PG_REFERENCED); pbits &= ~PG_A; } if ((pbits & PG_M) != 0) { if (m == NULL) m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); vm_page_dirty(m); } } if ((prot & VM_PROT_WRITE) == 0) pbits &= ~(PG_RW | PG_M); if ((prot & VM_PROT_EXECUTE) == 0) pbits |= pg_nx; if (pbits != obits) { if (!atomic_cmpset_long(pte, obits, pbits)) goto retry; if (obits & PG_G) pmap_invalidate_page(pmap, sva); else anychanged = 1; } } } if (anychanged) pmap_invalidate_all(pmap); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_paddr_t pa; pd_entry_t *pde; register pt_entry_t *pte; vm_paddr_t opa; pt_entry_t origpte, newpte; vm_page_t mpte, om; boolean_t invlva; va = trunc_page(va); #ifdef PMAP_DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va); #endif mpte = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { mpte = pmap_allocpte(pmap, va, M_WAITOK); } #if 0 && defined(PMAP_DIAGNOSTIC) else { pd_entry_t *pdeaddr = pmap_pde(pmap, va); origpte = *pdeaddr; if ((origpte & PG_V) == 0) { panic("pmap_enter: invalid kernel page table page, pde=%p, va=%p\n", origpte, va); } } #endif pde = pmap_pde(pmap, va); if (pde != NULL) { if ((*pde & PG_PS) != 0) panic("pmap_enter: attempted pmap_enter on 2MB page"); pte = pmap_pde_to_pte(pde, va); } else pte = NULL; /* * Page Directory table entry not valid, we need a new PT page */ if (pte == NULL) panic("pmap_enter: invalid page directory va=%#lx\n", va); pa = VM_PAGE_TO_PHYS(m); om = NULL; origpte = *pte; opa = origpte & PG_FRAME; /* * Mapping has not changed, must be protection or wiring change. */ if (origpte && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && ((origpte & PG_W) == 0)) pmap->pm_stats.wired_count++; else if (!wired && (origpte & PG_W)) pmap->pm_stats.wired_count--; /* * Remove extra pte reference */ if (mpte) mpte->wire_count--; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { om = m; pa |= PG_MANAGED; } goto validate; } /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. */ if (opa) { if (origpte & PG_W) pmap->pm_stats.wired_count--; if (origpte & PG_MANAGED) { om = PHYS_TO_VM_PAGE(opa); pmap_remove_entry(pmap, om, va); } if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%lx", va)); } } else pmap->pm_stats.resident_count++; /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, m); pa |= PG_MANAGED; } /* * Increment counters */ if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. */ newpte = (pt_entry_t)(pa | PG_V); if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; if (wired) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) newpte |= PG_U; if (pmap == kernel_pmap) newpte |= PG_G; /* * if the mapping or permission bits are different, we need * to update the pte. */ if ((origpte & ~(PG_M|PG_A)) != newpte) { if (origpte & PG_V) { invlva = FALSE; origpte = pte_load_store(pte, newpte | PG_A); if (origpte & PG_A) { if (origpte & PG_MANAGED) vm_page_flag_set(om, PG_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m) || ((origpte & PG_NX) == 0 && (newpte & PG_NX))) invlva = TRUE; } if (origpte & PG_M) { KASSERT((origpte & PG_RW), ("pmap_enter: modified page not writable: va: %#lx, pte: %#lx", va, origpte)); if ((origpte & PG_MANAGED) != 0) vm_page_dirty(om); if ((newpte & PG_RW) == 0) invlva = TRUE; } if (invlva) pmap_invalidate_page(pmap, va); } else pte_store(pte, newpte | PG_A); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); mpte = NULL; m = m_start; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot, mpte); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ -vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { PMAP_LOCK(pmap); - mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); + (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); PMAP_UNLOCK(pmap); - return (mpte); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { vm_page_t free; pt_entry_t *pte; vm_paddr_t pa; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { vm_pindex_t ptepindex; pd_entry_t *ptepa; /* * Calculate pagetable page index */ ptepindex = pmap_pde_pindex(va); if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { /* * Get the page directory entry */ ptepa = pmap_pde(pmap, va); /* * If the page table page is mapped, we just increment * the hold count, and activate it. */ if (ptepa && (*ptepa & PG_V) != 0) { if (*ptepa & PG_PS) panic("pmap_enter_quick: unexpected mapping into 2MB page"); mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT); if (mpte == NULL) return (mpte); } } } else { mpte = NULL; } /* * This call to vtopte makes the assumption that we are * entering the page into the current pmap. In order to support * quick entry into any pmap, one would likely use pmap_pte. * But that isn't as quick as vtopte. */ pte = vtopte(va); if (*pte) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; if (pmap_unwire_pte_hold(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); } mpte = NULL; } return (mpte); } /* * Increment counters */ pmap->pm_stats.resident_count++; pa = VM_PAGE_TO_PHYS(m); if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; /* * Now validate mapping with RO protection */ if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); return mpte; } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_paddr_t pa, int i) { vm_offset_t va; va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); pmap_kenter(va, pa); invlpg(va); return ((void *)crashdumpmap); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { vm_offset_t va; vm_page_t p, pdpg; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); if (((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { vm_page_t m[1]; pd_entry_t ptepa, *pde; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr); if (pde != 0 && (*pde & PG_V) != 0) goto out; PMAP_UNLOCK(pmap); retry: p = vm_page_lookup(object, pindex); if (p != NULL) { vm_page_lock_queues(); if (vm_page_sleep_if_busy(p, FALSE, "init4p")) goto retry; } else { p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); if (p == NULL) return; m[0] = p; if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); return; } p = vm_page_lookup(object, pindex); vm_page_lock_queues(); vm_page_wakeup(p); } vm_page_unlock_queues(); ptepa = VM_PAGE_TO_PHYS(p); if (ptepa & (NBPDR - 1)) return; p->valid = VM_PAGE_BITS_ALL; PMAP_LOCK(pmap); for (va = addr; va < addr + size; va += NBPDR) { while ((pdpg = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) { PMAP_UNLOCK(pmap); vm_page_lock_queues(); vm_page_busy(p); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); VM_WAIT; VM_OBJECT_LOCK(object); vm_page_lock_queues(); vm_page_wakeup(p); vm_page_unlock_queues(); PMAP_LOCK(pmap); } pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); pde = &pde[pmap_pde_index(va)]; if ((*pde & PG_V) == 0) { pde_store(pde, ptepa | PG_PS | PG_M | PG_A | PG_U | PG_RW | PG_V); pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; } else { pdpg->wire_count--; KASSERT(pdpg->wire_count > 0, ("pmap_object_init_pt: missing reference " "to page directory page, va: 0x%lx", va)); } ptepa += NBPDR; } pmap_invalidate_all(pmap); out: PMAP_UNLOCK(pmap); } } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { register pt_entry_t *pte; /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ PMAP_LOCK(pmap); pte = pmap_pte(pmap, va); if (wired && (*pte & PG_W) == 0) { pmap->pm_stats.wired_count++; atomic_set_long(pte, PG_W); } else if (!wired && (*pte & PG_W) != 0) { pmap->pm_stats.wired_count--; atomic_clear_long(pte, PG_W); } PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { vm_page_t free; vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t va_next; if (dst_addr != src_addr) return; if (!pmap_is_current(src_pmap)) return; vm_page_lock_queues(); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } for (addr = src_addr; addr < end_addr; addr = va_next) { pt_entry_t *src_pte, *dst_pte; vm_page_t dstmpde, dstmpte, srcmpte; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t srcptepaddr, *pde; if (addr >= UPT_MIN_ADDRESS) panic("pmap_copy: invalid to pmap_copy page tables"); pml4e = pmap_pml4e(src_pmap, addr); if ((*pml4e & PG_V) == 0) { va_next = (addr + NBPML4) & ~PML4MASK; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, addr); if ((*pdpe & PG_V) == 0) { va_next = (addr + NBPDP) & ~PDPMASK; continue; } va_next = (addr + NBPDR) & ~PDRMASK; pde = pmap_pdpe_to_pde(pdpe, addr); srcptepaddr = *pde; if (srcptepaddr == 0) continue; if (srcptepaddr & PG_PS) { dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT); if (dstmpde == NULL) break; pde = (pd_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde)); pde = &pde[pmap_pde_index(addr)]; if (*pde == 0) { *pde = srcptepaddr & ~PG_W; dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; } else dstmpde->wire_count--; continue; } srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); if (srcmpte->wire_count == 0) panic("pmap_copy: source page table page is unused"); if (va_next > end_addr) va_next = end_addr; src_pte = vtopte(addr); while (addr < va_next) { pt_entry_t ptetemp; ptetemp = *src_pte; /* * we only virtual copy managed pages */ if ((ptetemp & PG_MANAGED) != 0) { dstmpte = pmap_allocpte(dst_pmap, addr, M_NOWAIT); if (dstmpte == NULL) break; dst_pte = (pt_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); dst_pte = &dst_pte[pmap_pte_index(addr)]; if (*dst_pte == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { /* * Clear the wired, modified, and * accessed (referenced) bits * during the copy. */ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); dst_pmap->pm_stats.resident_count++; } else { free = NULL; if (pmap_unwire_pte_hold(dst_pmap, addr, dstmpte, &free)) { pmap_invalidate_page(dst_pmap, addr); pmap_free_zero_pages(free); } } if (dstmpte->wire_count >= srcmpte->wire_count) break; } addr += PAGE_SIZE; src_pte++; } } vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); if (off == 0 && size == PAGE_SIZE) pagezero((void *)va); else bzero((char *)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. This * is intended to be called from the vm_pagezero process only and * outside of Giant. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); pagecopy((void *)src, (void *)dst); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap, m) pmap_t pmap; vm_page_t m; { pv_entry_t pv; int loops = 0; if (m->flags & PG_FICTITIOUS) return FALSE; mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { return TRUE; } loops++; if (loops >= 16) break; } return (FALSE); } #define PMAP_REMOVE_PAGES_CURPROC_ONLY /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap, sva, eva) pmap_t pmap; vm_offset_t sva, eva; { pt_entry_t *pte, tpte; vm_page_t m, free = NULL; pv_entry_t pv, npv; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } #endif vm_page_lock_queues(); PMAP_LOCK(pmap); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_va >= eva || pv->pv_va < sva) { npv = TAILQ_NEXT(pv, pv_plist); continue; } #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY pte = vtopte(pv->pv_va); #else pte = pmap_pte(pmap, pv->pv_va); #endif tpte = *pte; if (tpte == 0) { printf("TPTE at %p IS ZERO @ VA %08lx\n", pte, pv->pv_va); panic("bad pte"); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { npv = TAILQ_NEXT(pv, pv_plist); continue; } m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); KASSERT(m->phys_addr == (tpte & PG_FRAME), ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT(m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); pmap->pm_stats.resident_count--; pte_clear(pte); /* * Update the vm_page_t clean and reference bits. */ if (tpte & PG_M) { vm_page_dirty(m); } npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); pmap_unuse_pt(pmap, pv->pv_va, *vtopde(pv->pv_va), &free); free_pv_entry(pv); } pmap_invalidate_all(pmap); pmap_free_zero_pages(free); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; boolean_t rv; rv = FALSE; if (m->flags & PG_FICTITIOUS) return (rv); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_pte(pv->pv_pmap, pv->pv_va); rv = (*pte & PG_M) != 0; PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte; boolean_t rv; rv = FALSE; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr); if (pde != NULL && (*pde & PG_V)) { pte = vtopte(addr); rv = (*pte & PG_V) == 0; } PMAP_UNLOCK(pmap); return (rv); } /* * Clear the given bit in each of the given page's ptes. */ static __inline void pmap_clear_ptes(vm_page_t m, long bit) { register pv_entry_t pv; pt_entry_t pbits, *pte; if ((m->flags & PG_FICTITIOUS) || (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0)) return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_pte(pv->pv_pmap, pv->pv_va); retry: pbits = *pte; if (pbits & bit) { if (bit == PG_RW) { if (!atomic_cmpset_long(pte, pbits, pbits & ~(PG_RW | PG_M))) goto retry; if (pbits & PG_M) { vm_page_dirty(m); } } else { atomic_clear_long(pte, bit); } pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } PMAP_UNLOCK(pv->pv_pmap); } if (bit == PG_RW) vm_page_flag_clear(m, PG_WRITEABLE); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { pmap_clear_ptes(m, PG_RW); } else { pmap_remove_all(m); } } } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { register pv_entry_t pv, pvf, pvn; pt_entry_t *pte; pt_entry_t v; int rtval = 0; if (m->flags & PG_FICTITIOUS) return (rtval); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; do { pvn = TAILQ_NEXT(pv, pv_list); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); PMAP_LOCK(pv->pv_pmap); pte = pmap_pte(pv->pv_pmap, pv->pv_va); if (pte && ((v = pte_load(pte)) & PG_A) != 0) { atomic_clear_long(pte, PG_A); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); rtval++; if (rtval > 4) { PMAP_UNLOCK(pv->pv_pmap); break; } } PMAP_UNLOCK(pv->pv_pmap); } while ((pv = pvn) != NULL && pv != pvf); } return (rtval); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { pmap_clear_ptes(m, PG_M); } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { pmap_clear_ptes(m, PG_A); } /* * Miscellaneous support routines follow */ /* Adjust the cache mode for a 4KB page mapped via a PTE. */ static __inline void pmap_pte_attr(vm_offset_t va, int mode) { pt_entry_t *pte; u_int opte, npte; pte = vtopte(va); /* * The cache mode bits are all in the low 32-bits of the * PTE, so we can just spin on updating the low 32-bits. */ do { opte = *(u_int *)pte; npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); npte |= pmap_cache_bits(mode, 0); } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); } /* Adjust the cache mode for a 2MB page mapped via a PDE. */ static __inline void pmap_pde_attr(vm_offset_t va, int mode) { pd_entry_t *pde; u_int opde, npde; pde = pmap_pde(kernel_pmap, va); /* * The cache mode bits are all in the low 32-bits of the * PDE, so we can just spin on updating the low 32-bits. */ do { opde = *(u_int *)pde; npde = opde & ~(PG_PDE_PAT | PG_NC_PCD | PG_NC_PWT); npde |= pmap_cache_bits(mode, 1); } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { vm_offset_t va, tmpva, offset; /* * If this fits within the direct map window and use WB caching * mode, use the direct map. */ if (pa < dmaplimit && (pa + size) < dmaplimit && mode == PAT_WRITE_BACK) return ((void *)PHYS_TO_DMAP(pa)); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); va = kmem_alloc_nofault(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); pa = trunc_page(pa); for (tmpva = va; size > 0; ) { pmap_kenter_attr(tmpva, pa, mode); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); pmap_invalidate_cache(); return ((void *)(va + offset)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); } void pmap_unmapdev(va, size) vm_offset_t va; vm_size_t size; { vm_offset_t base, offset, tmpva; /* If we gave a direct map region in pmap_mapdev, do nothing */ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) return; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) pmap_kremove(tmpva); pmap_invalidate_range(kernel_pmap, va, tmpva); kmem_free(kernel_map, base, size); } int pmap_change_attr(va, size, mode) vm_offset_t va; vm_size_t size; int mode; { vm_offset_t base, offset, tmpva; pd_entry_t *pde; pt_entry_t *pte; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); /* Only supported on kernel virtual addresses. */ if (base <= VM_MAXUSER_ADDRESS) return (EINVAL); /* * XXX: We have to support tearing 2MB pages down into 4k pages if * needed here. */ /* Pages that aren't mapped aren't supported. */ for (tmpva = base; tmpva < (base + size); ) { pde = pmap_pde(kernel_pmap, tmpva); if (*pde == 0) return (EINVAL); if (*pde & PG_PS) { /* Handle 2MB pages that are completely contained. */ if (size >= NBPDR) { tmpva += NBPDR; continue; } return (EINVAL); } pte = vtopte(va); if (*pte == 0) return (EINVAL); tmpva += PAGE_SIZE; } /* * Ok, all the pages exist, so run through them updating their * cache mode. */ for (tmpva = base; size > 0; ) { pde = pmap_pde(kernel_pmap, tmpva); if (*pde & PG_PS) { pmap_pde_attr(tmpva, mode); tmpva += NBPDR; size -= NBPDR; } else { pmap_pte_attr(tmpva, mode); tmpva += PAGE_SIZE; size -= PAGE_SIZE; } } /* * Flush CPU caches to make sure any data isn't cached that shouldn't * be, etc. */ pmap_invalidate_range(kernel_pmap, base, tmpva); pmap_invalidate_cache(); return (0); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap, addr) pmap_t pmap; vm_offset_t addr; { pt_entry_t *ptep, pte; vm_page_t m; int val = 0; PMAP_LOCK(pmap); ptep = pmap_pte(pmap, addr); pte = (ptep != NULL) ? *ptep : 0; PMAP_UNLOCK(pmap); if (pte != 0) { vm_paddr_t pa; val = MINCORE_INCORE; if ((pte & PG_MANAGED) == 0) return val; pa = pte & PG_FRAME; m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if (pte & PG_M) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; else { /* * Modified by someone else */ vm_page_lock_queues(); if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; vm_page_unlock_queues(); } /* * Referenced by us */ if (pte & PG_A) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; else { /* * Referenced by someone else */ vm_page_lock_queues(); if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } vm_page_unlock_queues(); } } return val; } void pmap_activate(struct thread *td) { pmap_t pmap, oldpmap; u_int64_t cr3; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); oldpmap = PCPU_GET(curpmap); #ifdef SMP if (oldpmap) /* XXX FIXME */ atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); #else if (oldpmap) /* XXX FIXME */ oldpmap->pm_active &= ~PCPU_GET(cpumask); pmap->pm_active |= PCPU_GET(cpumask); #endif cr3 = vtophys(pmap->pm_pml4); td->td_pcb->pcb_cr3 = cr3; load_cr3(cr3); critical_exit(); } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { return addr; } addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); return addr; } Index: stable/6/sys/arm/arm/pmap.c =================================================================== --- stable/6/sys/arm/arm/pmap.c (revision 173366) +++ stable/6/sys/arm/arm/pmap.c (revision 173367) @@ -1,4894 +1,4892 @@ /* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ /*- * Copyright 2004 Olivier Houchard. * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2002-2003 Wasabi Systems, Inc. * Copyright (c) 2001 Richard Earnshaw * Copyright (c) 2001-2002 Christopher Gilbert * All rights reserved. * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * * RiscBSD kernel project * * pmap.c * * Machine dependant vm stuff * * Created : 20/09/94 */ /* * Special compilation symbols * PMAP_DEBUG - Build in pmap_debug_level code */ /* Include header files */ #include "opt_vm.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PMAP_DEBUG #define PDEBUG(_lev_,_stat_) \ if (pmap_debug_level >= (_lev_)) \ ((_stat_)) #define dprintf printf int pmap_debug_level = 0; #define PMAP_INLINE #else /* PMAP_DEBUG */ #define PDEBUG(_lev_,_stat_) /* Nothing */ #define dprintf(x, arg...) #define PMAP_INLINE __inline #endif /* PMAP_DEBUG */ extern struct pv_addr systempage; /* * Internal function prototypes */ static void pmap_free_pv_entry (pv_entry_t); static pv_entry_t pmap_get_pv_entry(void); static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); static void pmap_vac_me_harder(struct vm_page *, pmap_t, vm_offset_t); static void pmap_vac_me_kpmap(struct vm_page *, pmap_t, vm_offset_t); static void pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t); static void pmap_alloc_l1(pmap_t); static void pmap_free_l1(pmap_t); static void pmap_use_l1(pmap_t); static int pmap_clearbit(struct vm_page *, u_int); static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); static vm_offset_t kernel_pt_lookup(vm_paddr_t); static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); vm_offset_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t pmap_curmaxkvaddr; extern void *end; vm_offset_t kernel_vm_end = 0; struct pmap kernel_pmap_store; pmap_t kernel_pmap; static pt_entry_t *csrc_pte, *cdst_pte; static vm_offset_t csrcp, cdstp; static struct mtx cmtx; static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); /* * These routines are called when the CPU type is identified to set up * the PTE prototypes, cache modes, etc. * * The variables are always here, just in case LKMs need to reference * them (though, they shouldn't). */ pt_entry_t pte_l1_s_cache_mode; pt_entry_t pte_l1_s_cache_mode_pt; pt_entry_t pte_l1_s_cache_mask; pt_entry_t pte_l2_l_cache_mode; pt_entry_t pte_l2_l_cache_mode_pt; pt_entry_t pte_l2_l_cache_mask; pt_entry_t pte_l2_s_cache_mode; pt_entry_t pte_l2_s_cache_mode_pt; pt_entry_t pte_l2_s_cache_mask; pt_entry_t pte_l2_s_prot_u; pt_entry_t pte_l2_s_prot_w; pt_entry_t pte_l2_s_prot_mask; pt_entry_t pte_l1_s_proto; pt_entry_t pte_l1_c_proto; pt_entry_t pte_l2_s_proto; void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); void (*pmap_zero_page_func)(vm_paddr_t, int, int); /* * Which pmap is currently 'live' in the cache * * XXXSCW: Fix for SMP ... */ union pmap_cache_state *pmap_cache_state; LIST_HEAD(pmaplist, pmap); struct pmaplist allpmaps; /* static pt_entry_t *msgbufmap;*/ struct msgbuf *msgbufp = 0; extern void bcopy_page(vm_offset_t, vm_offset_t); extern void bzero_page(vm_offset_t); char *_tmppt; /* * Metadata for L1 translation tables. */ struct l1_ttable { /* Entry on the L1 Table list */ SLIST_ENTRY(l1_ttable) l1_link; /* Entry on the L1 Least Recently Used list */ TAILQ_ENTRY(l1_ttable) l1_lru; /* Track how many domains are allocated from this L1 */ volatile u_int l1_domain_use_count; /* * A free-list of domain numbers for this L1. * We avoid using ffs() and a bitmap to track domains since ffs() * is slow on ARM. */ u_int8_t l1_domain_first; u_int8_t l1_domain_free[PMAP_DOMAINS]; /* Physical address of this L1 page table */ vm_paddr_t l1_physaddr; /* KVA of this L1 page table */ pd_entry_t *l1_kva; }; /* * Convert a virtual address into its L1 table index. That is, the * index used to locate the L2 descriptor table pointer in an L1 table. * This is basically used to index l1->l1_kva[]. * * Each L2 descriptor table represents 1MB of VA space. */ #define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) /* * L1 Page Tables are tracked using a Least Recently Used list. * - New L1s are allocated from the HEAD. * - Freed L1s are added to the TAIl. * - Recently accessed L1s (where an 'access' is some change to one of * the userland pmaps which owns this L1) are moved to the TAIL. */ static TAILQ_HEAD(, l1_ttable) l1_lru_list; /* * A list of all L1 tables */ static SLIST_HEAD(, l1_ttable) l1_list; static struct mtx l1_lru_lock; /* * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. * * This is normally 16MB worth L2 page descriptors for any given pmap. * Reference counts are maintained for L2 descriptors so they can be * freed when empty. */ struct l2_dtable { /* The number of L2 page descriptors allocated to this l2_dtable */ u_int l2_occupancy; /* List of L2 page descriptors */ struct l2_bucket { pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ vm_paddr_t l2b_phys; /* Physical address of same */ u_short l2b_l1idx; /* This L2 table's L1 index */ u_short l2b_occupancy; /* How many active descriptors */ } l2_bucket[L2_BUCKET_SIZE]; }; /* pmap_kenter_internal flags */ #define KENTER_CACHE 0x1 #define KENTER_USER 0x2 /* * Given an L1 table index, calculate the corresponding l2_dtable index * and bucket index within the l2_dtable. */ #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ (L2_SIZE - 1)) #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) /* * Given a virtual address, this macro returns the * virtual address required to drop into the next L2 bucket. */ #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) /* * L2 allocation. */ #define pmap_alloc_l2_dtable() \ (void*)uma_zalloc(l2table_zone, M_NOWAIT) #define pmap_free_l2_dtable(l2) \ uma_zfree(l2table_zone, l2) /* * We try to map the page tables write-through, if possible. However, not * all CPUs have a write-through cache mode, so on those we have to sync * the cache when we frob page tables. * * We try to evaluate this at compile time, if possible. However, it's * not always possible to do that, hence this run-time var. */ int pmap_needs_pte_sync; /* * Macro to determine if a mapping might be resident in the * instruction cache and/or TLB */ #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) /* * Macro to determine if a mapping might be resident in the * data cache and/or TLB */ #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) /* * Data for the pv entry allocation mechanism */ #define MINPV 2048 #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #define pmap_is_current(pm) ((pm) == pmap_kernel() || \ curproc->p_vmspace->vm_map.pmap == (pm)) static uma_zone_t pvzone; uma_zone_t l2zone; static uma_zone_t l2table_zone; static vm_offset_t pmap_kernel_l2dtable_kva; static vm_offset_t pmap_kernel_l2ptp_kva; static vm_paddr_t pmap_kernel_l2ptp_phys; static struct vm_object pvzone_obj; static struct vm_object l2zone_obj; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; int pmap_pagedaemon_waken = 0; /* * This list exists for the benefit of pmap_map_chunk(). It keeps track * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can * find them as necessary. * * Note that the data on this list MUST remain valid after initarm() returns, * as pmap_bootstrap() uses it to contruct L2 table metadata. */ SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); static void pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) { int i; l1->l1_kva = l1pt; l1->l1_domain_use_count = 0; l1->l1_domain_first = 0; for (i = 0; i < PMAP_DOMAINS; i++) l1->l1_domain_free[i] = i + 1; /* * Copy the kernel's L1 entries to each new L1. */ if (l1pt != pmap_kernel()->pm_l1->l1_kva) memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0) panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); SLIST_INSERT_HEAD(&l1_list, l1, l1_link); TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); } static vm_offset_t kernel_pt_lookup(vm_paddr_t pa) { struct pv_addr *pv; SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { #ifndef ARM32_NEW_VM_LAYOUT if (pv->pv_pa == (pa & ~PAGE_MASK)) { return (pv->pv_va | (pa & PAGE_MASK)); } #else if (pv->pv_pa == pa) return (pv->pv_va); #endif } return (0); } #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 void pmap_pte_init_generic(void) { pte_l1_s_cache_mode = L1_S_B|L1_S_C; pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; pte_l2_l_cache_mode = L2_B|L2_C; pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; pte_l2_s_cache_mode = L2_B|L2_C; pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; /* * If we have a write-through cache, set B and C. If * we have a write-back cache, then we assume setting * only C will make those pages write-through. */ if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; pte_l2_l_cache_mode_pt = L2_B|L2_C; pte_l2_s_cache_mode_pt = L2_B|L2_C; } else { pte_l1_s_cache_mode_pt = L1_S_C; pte_l2_l_cache_mode_pt = L2_C; pte_l2_s_cache_mode_pt = L2_C; } pte_l2_s_prot_u = L2_S_PROT_U_generic; pte_l2_s_prot_w = L2_S_PROT_W_generic; pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; pte_l1_s_proto = L1_S_PROTO_generic; pte_l1_c_proto = L1_C_PROTO_generic; pte_l2_s_proto = L2_S_PROTO_generic; pmap_copy_page_func = pmap_copy_page_generic; pmap_zero_page_func = pmap_zero_page_generic; } #if defined(CPU_ARM8) void pmap_pte_init_arm8(void) { /* * ARM8 is compatible with generic, but we need to use * the page tables uncached. */ pmap_pte_init_generic(); pte_l1_s_cache_mode_pt = 0; pte_l2_l_cache_mode_pt = 0; pte_l2_s_cache_mode_pt = 0; } #endif /* CPU_ARM8 */ #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) void pmap_pte_init_arm9(void) { /* * ARM9 is compatible with generic, but we want to use * write-through caching for now. */ pmap_pte_init_generic(); pte_l1_s_cache_mode = L1_S_C; pte_l2_l_cache_mode = L2_C; pte_l2_s_cache_mode = L2_C; pte_l1_s_cache_mode_pt = L1_S_C; pte_l2_l_cache_mode_pt = L2_C; pte_l2_s_cache_mode_pt = L2_C; } #endif /* CPU_ARM9 */ #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ #if defined(CPU_ARM10) void pmap_pte_init_arm10(void) { /* * ARM10 is compatible with generic, but we want to use * write-through caching for now. */ pmap_pte_init_generic(); pte_l1_s_cache_mode = L1_S_B | L1_S_C; pte_l2_l_cache_mode = L2_B | L2_C; pte_l2_s_cache_mode = L2_B | L2_C; pte_l1_s_cache_mode_pt = L1_S_C; pte_l2_l_cache_mode_pt = L2_C; pte_l2_s_cache_mode_pt = L2_C; } #endif /* CPU_ARM10 */ #if ARM_MMU_SA1 == 1 void pmap_pte_init_sa1(void) { /* * The StrongARM SA-1 cache does not have a write-through * mode. So, do the generic initialization, then reset * the page table cache mode to B=1,C=1, and note that * the PTEs need to be sync'd. */ pmap_pte_init_generic(); pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; pte_l2_l_cache_mode_pt = L2_B|L2_C; pte_l2_s_cache_mode_pt = L2_B|L2_C; pmap_needs_pte_sync = 1; } #endif /* ARM_MMU_SA1 == 1*/ #if ARM_MMU_XSCALE == 1 #if (ARM_NMMUS > 1) static u_int xscale_use_minidata; #endif void pmap_pte_init_xscale(void) { uint32_t auxctl; int write_through = 0; pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; pte_l2_l_cache_mode = L2_B|L2_C; pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; pte_l2_s_cache_mode = L2_B|L2_C; pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; pte_l1_s_cache_mode_pt = L1_S_C; pte_l2_l_cache_mode_pt = L2_C; pte_l2_s_cache_mode_pt = L2_C; #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE /* * The XScale core has an enhanced mode where writes that * miss the cache cause a cache line to be allocated. This * is significantly faster than the traditional, write-through * behavior of this case. */ pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ #ifdef XSCALE_CACHE_WRITE_THROUGH /* * Some versions of the XScale core have various bugs in * their cache units, the work-around for which is to run * the cache in write-through mode. Unfortunately, this * has a major (negative) impact on performance. So, we * go ahead and run fast-and-loose, in the hopes that we * don't line up the planets in a way that will trip the * bugs. * * However, we give you the option to be slow-but-correct. */ write_through = 1; #elif defined(XSCALE_CACHE_WRITE_BACK) /* force write back cache mode */ write_through = 0; #elif defined(CPU_XSCALE_PXA2X0) /* * Intel PXA2[15]0 processors are known to have a bug in * write-back cache on revision 4 and earlier (stepping * A[01] and B[012]). Fixed for C0 and later. */ { uint32_t id, type; id = cpufunc_id(); type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { if ((id & CPU_ID_REVISION_MASK) < 5) { /* write through for stepping A0-1 and B0-2 */ write_through = 1; } } } #endif /* XSCALE_CACHE_WRITE_THROUGH */ if (write_through) { pte_l1_s_cache_mode = L1_S_C; pte_l2_l_cache_mode = L2_C; pte_l2_s_cache_mode = L2_C; } #if (ARM_NMMUS > 1) xscale_use_minidata = 1; #endif pte_l2_s_prot_u = L2_S_PROT_U_xscale; pte_l2_s_prot_w = L2_S_PROT_W_xscale; pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; pte_l1_s_proto = L1_S_PROTO_xscale; pte_l1_c_proto = L1_C_PROTO_xscale; pte_l2_s_proto = L2_S_PROTO_xscale; pmap_copy_page_func = pmap_copy_page_xscale; pmap_zero_page_func = pmap_zero_page_xscale; /* * Disable ECC protection of page table access, for now. */ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); auxctl &= ~XSCALE_AUXCTL_P; __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); } /* * xscale_setup_minidata: * * Set up the mini-data cache clean area. We require the * caller to allocate the right amount of physically and * virtually contiguous space. */ extern vm_offset_t xscale_minidata_clean_addr; extern vm_size_t xscale_minidata_clean_size; /* already initialized */ void xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t *pte; vm_size_t size; uint32_t auxctl; xscale_minidata_clean_addr = va; /* Round it to page size. */ size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; for (; size != 0; va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { #ifndef ARM32_NEW_VM_LAYOUT pte = (pt_entry_t *) kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); #else pte = (pt_entry_t *) kernel_pt_lookup( pde[L1_IDX(va)] & L1_C_ADDR_MASK); #endif if (pte == NULL) panic("xscale_setup_minidata: can't find L2 table for " "VA 0x%08x", (u_int32_t) va); #ifndef ARM32_NEW_VM_LAYOUT pte[(va >> PAGE_SHIFT) & 0x3ff] = #else pte[l2pte_index(va)] = #endif L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); } /* * Configure the mini-data cache for write-back with * read/write-allocate. * * NOTE: In order to reconfigure the mini-data cache, we must * make sure it contains no valid data! In order to do that, * we must issue a global data cache invalidate command! * * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! * THIS IS VERY IMPORTANT! */ /* Invalidate data and mini-data. */ __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); } #endif /* * Allocate an L1 translation table for the specified pmap. * This is called at pmap creation time. */ static void pmap_alloc_l1(pmap_t pm) { struct l1_ttable *l1; u_int8_t domain; /* * Remove the L1 at the head of the LRU list */ mtx_lock(&l1_lru_lock); l1 = TAILQ_FIRST(&l1_lru_list); TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); /* * Pick the first available domain number, and update * the link to the next number. */ domain = l1->l1_domain_first; l1->l1_domain_first = l1->l1_domain_free[domain]; /* * If there are still free domain numbers in this L1, * put it back on the TAIL of the LRU list. */ if (++l1->l1_domain_use_count < PMAP_DOMAINS) TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); mtx_unlock(&l1_lru_lock); /* * Fix up the relevant bits in the pmap structure */ pm->pm_l1 = l1; pm->pm_domain = domain; } /* * Free an L1 translation table. * This is called at pmap destruction time. */ static void pmap_free_l1(pmap_t pm) { struct l1_ttable *l1 = pm->pm_l1; mtx_lock(&l1_lru_lock); /* * If this L1 is currently on the LRU list, remove it. */ if (l1->l1_domain_use_count < PMAP_DOMAINS) TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); /* * Free up the domain number which was allocated to the pmap */ l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; l1->l1_domain_first = pm->pm_domain; l1->l1_domain_use_count--; /* * The L1 now must have at least 1 free domain, so add * it back to the LRU list. If the use count is zero, * put it at the head of the list, otherwise it goes * to the tail. */ if (l1->l1_domain_use_count == 0) { TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); } else TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); mtx_unlock(&l1_lru_lock); } static PMAP_INLINE void pmap_use_l1(pmap_t pm) { struct l1_ttable *l1; /* * Do nothing if we're in interrupt context. * Access to an L1 by the kernel pmap must not affect * the LRU list. */ if (pm == pmap_kernel()) return; l1 = pm->pm_l1; /* * If the L1 is not currently on the LRU list, just return */ if (l1->l1_domain_use_count == PMAP_DOMAINS) return; mtx_lock(&l1_lru_lock); /* * Check the use count again, now that we've acquired the lock */ if (l1->l1_domain_use_count == PMAP_DOMAINS) { mtx_unlock(&l1_lru_lock); return; } /* * Move the L1 to the back of the LRU list */ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); mtx_unlock(&l1_lru_lock); } /* * Returns a pointer to the L2 bucket associated with the specified pmap * and VA, or NULL if no L2 bucket exists for the address. */ static PMAP_INLINE struct l2_bucket * pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; u_short l1idx; l1idx = L1_IDX(va); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) return (NULL); return (l2b); } /* * Returns a pointer to the L2 bucket associated with the specified pmap * and VA. * * If no L2 bucket exists, perform the necessary allocations to put an L2 * bucket/page table in place. * * Note that if a new L2 bucket/page was allocated, the caller *must* * increment the bucket occupancy counter appropriately *before* * releasing the pmap's lock to ensure no other thread or cpu deallocates * the bucket/page in the meantime. */ static struct l2_bucket * pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; u_short l1idx; l1idx = L1_IDX(va); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { /* * No mapping at this address, as there is * no entry in the L1 table. * Need to allocate a new l2_dtable. */ again_l2table: vm_page_unlock_queues(); if ((l2 = pmap_alloc_l2_dtable()) == NULL) { vm_page_lock_queues(); return (NULL); } vm_page_lock_queues(); if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { vm_page_unlock_queues(); uma_zfree(l2table_zone, l2); vm_page_lock_queues(); l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL) goto again_l2table; /* * Someone already allocated the l2_dtable while * we were doing the same. */ } else { bzero(l2, sizeof(*l2)); /* * Link it into the parent pmap */ pm->pm_l2[L2_IDX(l1idx)] = l2; } } l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; /* * Fetch pointer to the L2 page table associated with the address. */ if (l2b->l2b_kva == NULL) { pt_entry_t *ptep; /* * No L2 page table has been allocated. Chances are, this * is because we just allocated the l2_dtable, above. */ again_ptep: vm_page_unlock_queues(); ptep = (void*)uma_zalloc(l2zone, M_NOWAIT); vm_page_lock_queues(); if (l2b->l2b_kva != 0) { /* We lost the race. */ vm_page_unlock_queues(); uma_zfree(l2zone, ptep); vm_page_lock_queues(); if (l2b->l2b_kva == 0) goto again_ptep; return (l2b); } l2b->l2b_phys = vtophys(ptep); if (ptep == NULL) { /* * Oops, no more L2 page tables available at this * time. We may need to deallocate the l2_dtable * if we allocated a new one above. */ if (l2->l2_occupancy == 0) { pm->pm_l2[L2_IDX(l1idx)] = NULL; pmap_free_l2_dtable(l2); } return (NULL); } l2->l2_occupancy++; l2b->l2b_kva = ptep; l2b->l2b_l1idx = l1idx; } return (l2b); } static PMAP_INLINE void #ifndef PMAP_INCLUDE_PTE_SYNC pmap_free_l2_ptp(pt_entry_t *l2) #else pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) #endif { #ifdef PMAP_INCLUDE_PTE_SYNC /* * Note: With a write-back cache, we may need to sync this * L2 table before re-using it. * This is because it may have belonged to a non-current * pmap, in which case the cache syncs would have been * skipped when the pages were being unmapped. If the * L2 table were then to be immediately re-allocated to * the *current* pmap, it may well contain stale mappings * which have not yet been cleared by a cache write-back * and so would still be visible to the mmu. */ if (need_sync) PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); #endif uma_zfree(l2zone, l2); } /* * One or more mappings in the specified L2 descriptor table have just been * invalidated. * * Garbage collect the metadata and descriptor table itself if necessary. * * The pmap lock must be acquired when this is called (not necessary * for the kernel pmap). */ static void pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep; u_short l1idx; /* * Update the bucket's reference count according to how many * PTEs the caller has just invalidated. */ l2b->l2b_occupancy -= count; /* * Note: * * Level 2 page tables allocated to the kernel pmap are never freed * as that would require checking all Level 1 page tables and * removing any references to the Level 2 page table. See also the * comment elsewhere about never freeing bootstrap L2 descriptors. * * We make do with just invalidating the mapping in the L2 table. * * This isn't really a big deal in practice and, in fact, leads * to a performance win over time as we don't need to continually * alloc/free. */ if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) return; /* * There are no more valid mappings in this level 2 page table. * Go ahead and NULL-out the pointer in the bucket, then * free the page table. */ l1idx = l2b->l2b_l1idx; ptep = l2b->l2b_kva; l2b->l2b_kva = NULL; pl1pd = &pm->pm_l1->l1_kva[l1idx]; /* * If the L1 slot matches the pmap's domain * number, then invalidate it. */ l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { *pl1pd = 0; PTE_SYNC(pl1pd); } /* * Release the L2 descriptor table back to the pool cache. */ #ifndef PMAP_INCLUDE_PTE_SYNC pmap_free_l2_ptp(ptep); #else pmap_free_l2_ptp(!pmap_is_current(pm), ptep); #endif /* * Update the reference count in the associated l2_dtable */ l2 = pm->pm_l2[L2_IDX(l1idx)]; if (--l2->l2_occupancy > 0) return; /* * There are no more valid mappings in any of the Level 1 * slots managed by this l2_dtable. Go ahead and NULL-out * the pointer in the parent pmap and free the l2_dtable. */ pm->pm_l2[L2_IDX(l1idx)] = NULL; pmap_free_l2_dtable(l2); } /* * Pool cache constructors for L2 descriptor tables, metadata and pmap * structures. */ static int pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) { #ifndef PMAP_INCLUDE_PTE_SYNC struct l2_bucket *l2b; pt_entry_t *ptep, pte; #ifdef ARM_USE_SMALL_ALLOC pd_entry_t *pde; #endif vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; /* * The mappings for these page tables were initially made using * pmap_kenter() by the pool subsystem. Therefore, the cache- * mode will not be right for page table mappings. To avoid * polluting the pmap_kenter() code with a special case for * page tables, we simply fix up the cache-mode here if it's not * correct. */ #ifdef ARM_USE_SMALL_ALLOC pde = &kernel_pmap->pm_l1->l1_kva[L1_IDX(va)]; if (!l1pte_section_p(*pde)) { #endif l2b = pmap_get_l2_bucket(pmap_kernel(), va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { /* * Page tables must have the cache-mode set to * Write-Thru. */ *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; PTE_SYNC(ptep); cpu_tlb_flushD_SE(va); cpu_cpwait(); } #ifdef ARM_USE_SMALL_ALLOC } #endif #endif memset(mem, 0, L2_TABLE_SIZE_REAL); PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); return (0); } /* * A bunch of routines to conditionally flush the caches/TLB depending * on whether the specified pmap actually needs to be flushed at any * given time. */ static PMAP_INLINE void pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) { if (pmap_is_current(pm)) cpu_tlb_flushID_SE(va); } static PMAP_INLINE void pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) { if (pmap_is_current(pm)) cpu_tlb_flushD_SE(va); } static PMAP_INLINE void pmap_tlb_flushID(pmap_t pm) { if (pmap_is_current(pm)) cpu_tlb_flushID(); } static PMAP_INLINE void pmap_tlb_flushD(pmap_t pm) { if (pmap_is_current(pm)) cpu_tlb_flushD(); } static PMAP_INLINE void pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) { if (pmap_is_current(pm)) cpu_idcache_wbinv_range(va, len); } static PMAP_INLINE void pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, boolean_t rd_only) { if (pmap_is_current(pm)) { if (do_inv) { if (rd_only) cpu_dcache_inv_range(va, len); else cpu_dcache_wbinv_range(va, len); } else if (!rd_only) cpu_dcache_wb_range(va, len); } } static PMAP_INLINE void pmap_idcache_wbinv_all(pmap_t pm) { if (pmap_is_current(pm)) cpu_idcache_wbinv_all(); } static PMAP_INLINE void pmap_dcache_wbinv_all(pmap_t pm) { if (pmap_is_current(pm)) cpu_dcache_wbinv_all(); } /* * PTE_SYNC_CURRENT: * * Make sure the pte is written out to RAM. * We need to do this for one of two cases: * - We're dealing with the kernel pmap * - There is no pmap active in the cache/tlb. * - The specified pmap is 'active' in the cache/tlb. */ #ifdef PMAP_INCLUDE_PTE_SYNC #define PTE_SYNC_CURRENT(pm, ptep) \ do { \ if (PMAP_NEEDS_PTE_SYNC && \ pmap_is_current(pm)) \ PTE_SYNC(ptep); \ } while (/*CONSTCOND*/0) #else #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ #endif /* * Since we have a virtually indexed cache, we may need to inhibit caching if * there is more than one mapping and at least one of them is writable. * Since we purge the cache on every context switch, we only need to check for * other mappings within the same pmap, or kernel_pmap. * This function is also called when a page is unmapped, to possibly reenable * caching on any remaining mappings. * * The code implements the following logic, where: * * KW = # of kernel read/write pages * KR = # of kernel read only pages * UW = # of user read/write pages * UR = # of user read only pages * * KC = kernel mapping is cacheable * UC = user mapping is cacheable * * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 * +--------------------------------------------- * UW=0,UR=0 | --- KC=1 KC=1 KC=0 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 */ static const int pmap_vac_flags[4][4] = { {-1, 0, 0, PVF_KNC}, {0, 0, PVF_NC, PVF_NC}, {0, PVF_NC, PVF_NC, PVF_NC}, {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} }; static PMAP_INLINE int pmap_get_vac_flags(const struct vm_page *pg) { int kidx, uidx; kidx = 0; if (pg->md.kro_mappings || pg->md.krw_mappings > 1) kidx |= 1; if (pg->md.krw_mappings) kidx |= 2; uidx = 0; if (pg->md.uro_mappings || pg->md.urw_mappings > 1) uidx |= 1; if (pg->md.urw_mappings) uidx |= 2; return (pmap_vac_flags[uidx][kidx]); } static __inline void pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va) { int nattr; nattr = pmap_get_vac_flags(pg); if (nattr < 0) { pg->md.pvh_attrs &= ~PVF_NC; return; } if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) { return; } if (pm == pmap_kernel()) pmap_vac_me_kpmap(pg, pm, va); else pmap_vac_me_user(pg, pm, va); pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr; } static void pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va) { u_int u_cacheable, u_entries; struct pv_entry *pv; pmap_t last_pmap = pm; /* * Pass one, see if there are both kernel and user pmaps for * this page. Calculate whether there are user-writable or * kernel-writable pages. */ u_cacheable = 0; TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) u_cacheable++; } u_entries = pg->md.urw_mappings + pg->md.uro_mappings; /* * We know we have just been updating a kernel entry, so if * all user pages are already cacheable, then there is nothing * further to do. */ if (pg->md.k_mappings == 0 && u_cacheable == u_entries) return; if (u_entries) { /* * Scan over the list again, for each entry, if it * might not be set correctly, call pmap_vac_me_user * to recalculate the settings. */ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* * We know kernel mappings will get set * correctly in other calls. We also know * that if the pmap is the same as last_pmap * then we've just handled this entry. */ if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) continue; /* * If there are kernel entries and this page * is writable but non-cacheable, then we can * skip this entry also. */ if (pg->md.k_mappings && (pv->pv_flags & (PVF_NC | PVF_WRITE)) == (PVF_NC | PVF_WRITE)) continue; /* * Similarly if there are no kernel-writable * entries and the page is already * read-only/cacheable. */ if (pg->md.krw_mappings == 0 && (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) continue; /* * For some of the remaining cases, we know * that we must recalculate, but for others we * can't tell if they are correct or not, so * we recalculate anyway. */ pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0); } if (pg->md.k_mappings == 0) return; } pmap_vac_me_user(pg, pm, va); } static void pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va) { pmap_t kpmap = pmap_kernel(); struct pv_entry *pv, *npv; struct l2_bucket *l2b; pt_entry_t *ptep, pte; u_int entries = 0; u_int writable = 0; u_int cacheable_entries = 0; u_int kern_cacheable = 0; u_int other_writable = 0; /* * Count mappings and writable mappings in this pmap. * Include kernel mappings as part of our own. * Keep a pointer to the first one. */ npv = TAILQ_FIRST(&pg->md.pv_list); TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* Count mappings in the same pmap */ if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { if (entries++ == 0) npv = pv; /* Cacheable mappings */ if ((pv->pv_flags & PVF_NC) == 0) { cacheable_entries++; if (kpmap == pv->pv_pmap) kern_cacheable++; } /* Writable mappings */ if (pv->pv_flags & PVF_WRITE) ++writable; } else if (pv->pv_flags & PVF_WRITE) other_writable = 1; } /* * Enable or disable caching as necessary. * Note: the first entry might be part of the kernel pmap, * so we can't assume this is indicative of the state of the * other (maybe non-kpmap) entries. */ if ((entries > 1 && writable) || (entries > 0 && pm == kpmap && other_writable)) { if (cacheable_entries == 0) return; for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || (pv->pv_flags & PVF_NC)) continue; pv->pv_flags |= PVF_NC; l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; pte = *ptep & ~L2_S_CACHE_MASK; if ((va != pv->pv_va || pm != pv->pv_pmap) && l2pte_valid(pte)) { if (PV_BEEN_EXECD(pv->pv_flags)) { pmap_idcache_wbinv_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE); pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); } else if (PV_BEEN_REFD(pv->pv_flags)) { pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, (pv->pv_flags & PVF_WRITE) == 0); pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); } } *ptep = pte; PTE_SYNC_CURRENT(pv->pv_pmap, ptep); } cpu_cpwait(); } else if (entries > cacheable_entries) { /* * Turn cacheing back on for some pages. If it is a kernel * page, only do so if there are no other writable pages. */ for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && (kpmap != pv->pv_pmap || other_writable))) continue; pv->pv_flags &= ~PVF_NC; l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; if (l2pte_valid(pte)) { if (PV_BEEN_EXECD(pv->pv_flags)) { pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); } else if (PV_BEEN_REFD(pv->pv_flags)) { pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); } } *ptep = pte; PTE_SYNC_CURRENT(pv->pv_pmap, ptep); } } } /* * Modify pte bits for all ptes corresponding to the given physical address. * We use `maskbits' rather than `clearbits' because we're always passing * constants and the latter would require an extra inversion at run-time. */ static int pmap_clearbit(struct vm_page *pg, u_int maskbits) { struct l2_bucket *l2b; struct pv_entry *pv; pt_entry_t *ptep, npte, opte; pmap_t pm; vm_offset_t va; u_int oflags; int count = 0; #if 0 PMAP_HEAD_TO_MAP_LOCK(); simple_lock(&pg->mdpage.pvh_slock); #endif /* * Clear saved attributes (modify, reference) */ pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); if (TAILQ_EMPTY(&pg->md.pv_list)) { #if 0 simple_unlock(&pg->mdpage.pvh_slock); PMAP_HEAD_TO_MAP_UNLOCK(); #endif return (0); } /* * Loop over all current mappings setting/clearing as appropos */ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { va = pv->pv_va; pm = pv->pv_pmap; oflags = pv->pv_flags; pv->pv_flags &= ~maskbits; #if 0 pmap_acquire_pmap_lock(pm); #endif l2b = pmap_get_l2_bucket(pm, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; npte = opte = *ptep; if (maskbits & (PVF_WRITE|PVF_MOD)) { if ((pv->pv_flags & PVF_NC)) { /* * Entry is not cacheable: * * Don't turn caching on again if this is a * modified emulation. This would be * inconsitent with the settings created by * pmap_vac_me_harder(). Otherwise, it's safe * to re-enable cacheing. * * There's no need to call pmap_vac_me_harder() * here: all pages are losing their write * permission. */ if (maskbits & PVF_WRITE) { npte |= pte_l2_s_cache_mode; pv->pv_flags &= ~PVF_NC; } } else if (opte & L2_S_PROT_W) { vm_page_dirty(pg); /* * Entry is writable/cacheable: check if pmap * is current if it is flush it, otherwise it * won't be in the cache */ if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, (maskbits & PVF_REF) ? TRUE : FALSE, FALSE); } /* make the pte read only */ npte &= ~L2_S_PROT_W; if (maskbits & PVF_WRITE) { /* * Keep alias accounting up to date */ if (pv->pv_pmap == pmap_kernel()) { if (oflags & PVF_WRITE) { pg->md.krw_mappings--; pg->md.kro_mappings++; } } else if (oflags & PVF_WRITE) { pg->md.urw_mappings--; pg->md.uro_mappings++; } } } if (maskbits & PVF_REF) { if ((pv->pv_flags & PVF_NC) == 0 && (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { /* * Check npte here; we may have already * done the wbinv above, and the validity * of the PTE is the same for opte and * npte. */ if (npte & L2_S_PROT_W) { if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, TRUE, FALSE); } else if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { /* XXXJRT need idcache_inv_range */ if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, TRUE, TRUE); } } /* * Make the PTE invalid so that we will take a * page fault the next time the mapping is * referenced. */ npte &= ~L2_TYPE_MASK; npte |= L2_TYPE_INV; } if (npte != opte) { count++; *ptep = npte; PTE_SYNC(ptep); /* Flush the TLB entry if a current pmap. */ if (PV_BEEN_EXECD(oflags)) pmap_tlb_flushID_SE(pm, pv->pv_va); else if (PV_BEEN_REFD(oflags)) pmap_tlb_flushD_SE(pm, pv->pv_va); } #if 0 pmap_release_pmap_lock(pm); #endif } #if 0 simple_unlock(&pg->mdpage.pvh_slock); PMAP_HEAD_TO_MAP_UNLOCK(); #endif if (maskbits & PVF_WRITE) vm_page_flag_clear(pg, PG_WRITEABLE); return (count); } /* * main pv_entry manipulation functions: * pmap_enter_pv: enter a mapping onto a vm_page list * pmap_remove_pv: remove a mappiing from a vm_page list * * NOTE: pmap_enter_pv expects to lock the pvh itself * pmap_remove_pv expects te caller to lock the pvh before calling */ /* * pmap_enter_pv: enter a mapping onto a vm_page lst * * => caller should hold the proper lock on pmap_main_lock * => caller should have pmap locked * => we will gain the lock on the vm_page and allocate the new pv_entry * => caller should adjust ptp's wire_count before calling * => caller should not adjust pmap's wire_count */ static void pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, vm_offset_t va, u_int flags) { pve->pv_pmap = pm; pve->pv_va = va; pve->pv_flags = flags; #if 0 mtx_lock(&pg->md.pvh_mtx); #endif TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); if (pm == pmap_kernel()) { if (flags & PVF_WRITE) pg->md.krw_mappings++; else pg->md.kro_mappings++; } if (flags & PVF_WRITE) pg->md.urw_mappings++; else pg->md.uro_mappings++; pg->md.pv_list_count++; #if 0 mtx_unlock(&pg->md.pvh_mtx); #endif if (pve->pv_flags & PVF_WIRED) ++pm->pm_stats.wired_count; vm_page_flag_set(pg, PG_REFERENCED); } /* * * pmap_find_pv: Find a pv entry * * => caller should hold lock on vm_page */ static PMAP_INLINE struct pv_entry * pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) { struct pv_entry *pv; TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) if (pm == pv->pv_pmap && va == pv->pv_va) break; return (pv); } /* * vector_page_setprot: * * Manipulate the protection of the vector page. */ void vector_page_setprot(int prot) { struct l2_bucket *l2b; pt_entry_t *ptep; l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); PTE_SYNC(ptep); cpu_tlb_flushD_SE(vector_page); cpu_cpwait(); } /* * pmap_remove_pv: try to remove a mapping from a pv_list * * => caller should hold proper lock on pmap_main_lock * => pmap should be locked * => caller should hold lock on vm_page [so that attrs can be adjusted] * => caller should adjust ptp's wire_count and free PTP if needed * => caller should NOT adjust pmap's wire_count * => we return the removed pve */ static void pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) { TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); if (pve->pv_flags & PVF_WIRED) --pm->pm_stats.wired_count; pg->md.pv_list_count--; if (pg->md.pvh_attrs & PVF_MOD) vm_page_dirty(pg); if (pm == pmap_kernel()) { if (pve->pv_flags & PVF_WRITE) pg->md.krw_mappings--; else pg->md.kro_mappings--; } else if (pve->pv_flags & PVF_WRITE) pg->md.urw_mappings--; else pg->md.uro_mappings--; if (TAILQ_FIRST(&pg->md.pv_list) == NULL || (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) { pg->md.pvh_attrs &= ~PVF_MOD; if (TAILQ_FIRST(&pg->md.pv_list) == NULL) pg->md.pvh_attrs &= ~PVF_REF; vm_page_flag_clear(pg, PG_WRITEABLE); } if (TAILQ_FIRST(&pg->md.pv_list)) vm_page_flag_set(pg, PG_REFERENCED); if (pve->pv_flags & PVF_WRITE) pmap_vac_me_harder(pg, pm, 0); } static struct pv_entry * pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) { struct pv_entry *pve; pve = TAILQ_FIRST(&pg->md.pv_list); while (pve) { if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ pmap_nuke_pv(pg, pm, pve); break; } pve = TAILQ_NEXT(pve, pv_list); } return(pve); /* return removed pve */ } /* * * pmap_modify_pv: Update pv flags * * => caller should hold lock on vm_page [so that attrs can be adjusted] * => caller should NOT adjust pmap's wire_count * => caller must call pmap_vac_me_harder() if writable status of a page * may have changed. * => we return the old flags * * Modify a physical-virtual mapping in the pv table */ static u_int pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, u_int clr_mask, u_int set_mask) { struct pv_entry *npv; u_int flags, oflags; if ((npv = pmap_find_pv(pg, pm, va)) == NULL) return (0); /* * There is at least one VA mapping this page. */ if (clr_mask & (PVF_REF | PVF_MOD)) pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); oflags = npv->pv_flags; npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; if ((flags ^ oflags) & PVF_WIRED) { if (flags & PVF_WIRED) ++pm->pm_stats.wired_count; else --pm->pm_stats.wired_count; } if ((flags ^ oflags) & PVF_WRITE) { if (pm == pmap_kernel()) { if (flags & PVF_WRITE) { pg->md.krw_mappings++; pg->md.kro_mappings--; } else { pg->md.kro_mappings++; pg->md.krw_mappings--; } } else if (flags & PVF_WRITE) { pg->md.urw_mappings++; pg->md.uro_mappings--; } else { pg->md.uro_mappings++; pg->md.urw_mappings--; } if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) { pg->md.pvh_attrs &= ~PVF_MOD; vm_page_flag_clear(pg, PG_WRITEABLE); } pmap_vac_me_harder(pg, pm, 0); } return (oflags); } /* Function to set the debug level of the pmap code */ #ifdef PMAP_DEBUG void pmap_debug(int level) { pmap_debug_level = level; dprintf("pmap_debug: level=%d\n", pmap_debug_level); } #endif /* PMAP_DEBUG */ void pmap_pinit0(struct pmap *pmap) { PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n", (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir); bcopy(kernel_pmap, pmap, sizeof(*pmap)); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { PDEBUG(1, printf("pmap_init: phys_start = %08x\n")); /* * init the pv free list */ pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); uma_prealloc(pvzone, MINPV); /* * Now it is safe to enable pv_table recording. */ PDEBUG(1, printf("pmap_init: done!\n")); } int pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) { struct l2_dtable *l2; struct l2_bucket *l2b; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; u_int l1idx; int rv = 0; #if 0 PMAP_MAP_TO_HEAD_LOCK(); pmap_acquire_pmap_lock(pm); #endif l1idx = L1_IDX(va); /* * If there is no l2_dtable for this address, then the process * has no business accessing it. * * Note: This will catch userland processes trying to access * kernel addresses. */ l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL) goto out; /* * Likewise if there is no L2 descriptor table */ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; if (l2b->l2b_kva == NULL) goto out; /* * Check the PTE itself. */ ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; if (pte == 0) goto out; /* * Catch a userland access to the vector page mapped at 0x0 */ if (user && (pte & L2_S_PROT_U) == 0) goto out; if (va == vector_page) goto out; pa = l2pte_pa(pte); if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { /* * This looks like a good candidate for "page modified" * emulation... */ struct pv_entry *pv; struct vm_page *pg; /* Extract the physical address of the page */ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { goto out; } /* Get the current flags for this page. */ pv = pmap_find_pv(pg, pm, va); if (pv == NULL) { goto out; } /* * Do the flags say this page is writable? If not then it * is a genuine write fault. If yes then the write fault is * our fault as we did not reflect the write access in the * PTE. Now we know a write has occurred we can correct this * and also set the modified bit */ if ((pv->pv_flags & PVF_WRITE) == 0) { goto out; } pg->md.pvh_attrs |= PVF_REF | PVF_MOD; vm_page_dirty(pg); pv->pv_flags |= PVF_REF | PVF_MOD; /* * Re-enable write permissions for the page. No need to call * pmap_vac_me_harder(), since this is just a * modified-emulation fault, and the PVF_WRITE bit isn't * changing. We've already set the cacheable bits based on * the assumption that we can write to this page. */ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; PTE_SYNC(ptep); rv = 1; } else if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { /* * This looks like a good candidate for "page referenced" * emulation. */ struct pv_entry *pv; struct vm_page *pg; /* Extract the physical address of the page */ vm_page_lock_queues(); if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { vm_page_unlock_queues(); goto out; } /* Get the current flags for this page. */ pv = pmap_find_pv(pg, pm, va); if (pv == NULL) { vm_page_unlock_queues(); goto out; } pg->md.pvh_attrs |= PVF_REF; pv->pv_flags |= PVF_REF; *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; PTE_SYNC(ptep); rv = 1; vm_page_unlock_queues(); } /* * We know there is a valid mapping here, so simply * fix up the L1 if necessary. */ pl1pd = &pm->pm_l1->l1_kva[l1idx]; l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; if (*pl1pd != l1pd) { *pl1pd = l1pd; PTE_SYNC(pl1pd); rv = 1; } #ifdef CPU_SA110 /* * There are bugs in the rev K SA110. This is a check for one * of them. */ if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && curcpu()->ci_arm_cpurev < 3) { /* Always current pmap */ if (l2pte_valid(pte)) { extern int kernel_debug; if (kernel_debug & 1) { struct proc *p = curlwp->l_proc; printf("prefetch_abort: page is already " "mapped - pte=%p *pte=%08x\n", ptep, pte); printf("prefetch_abort: pc=%08lx proc=%p " "process=%s\n", va, p, p->p_comm); printf("prefetch_abort: far=%08x fs=%x\n", cpu_faultaddress(), cpu_faultstatus()); } #ifdef DDB if (kernel_debug & 2) Debugger(); #endif rv = 1; } } #endif /* CPU_SA110 */ #ifdef DEBUG /* * If 'rv == 0' at this point, it generally indicates that there is a * stale TLB entry for the faulting address. This happens when two or * more processes are sharing an L1. Since we don't flush the TLB on * a context switch between such processes, we can take domain faults * for mappings which exist at the same VA in both processes. EVEN IF * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for * example. * * This is extremely likely to happen if pmap_enter() updated the L1 * entry for a recently entered mapping. In this case, the TLB is * flushed for the new mapping, but there may still be TLB entries for * other mappings belonging to other processes in the 1MB range * covered by the L1 entry. * * Since 'rv == 0', we know that the L1 already contains the correct * value, so the fault must be due to a stale TLB entry. * * Since we always need to flush the TLB anyway in the case where we * fixed up the L1, or frobbed the L2 PTE, we effectively deal with * stale TLB entries dynamically. * * However, the above condition can ONLY happen if the current L1 is * being shared. If it happens when the L1 is unshared, it indicates * that other parts of the pmap are not doing their job WRT managing * the TLB. */ if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { extern int last_fault_code; printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", pm, va, ftype); printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", l2, l2b, ptep, pl1pd); printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", pte, l1pd, last_fault_code); #ifdef DDB Debugger(); #endif } #endif cpu_tlb_flushID_SE(va); cpu_cpwait(); rv = 1; out: #if 0 pmap_release_pmap_lock(pm); PMAP_MAP_TO_HEAD_UNLOCK(); #endif return (rv); } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2() { int shpgperproc = PMAP_SHPGPERPROC; struct l2_bucket *l2b; struct l1_ttable *l1; pd_entry_t *pl1pt; pt_entry_t *ptep, pte; vm_offset_t va, eva; u_int loop, needed; TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); uma_prealloc(l2zone, 4096); l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); uma_prealloc(l2table_zone, 1024); uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); uma_zone_set_obj(l2zone, &l2zone_obj, pv_entry_max); needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); needed -= 1; l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); for (loop = 0; loop < needed; loop++, l1++) { /* Allocate a L1 page table */ va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0); if (va == 0) panic("Cannot allocate L1 KVM"); eva = va + L1_TABLE_SIZE; pl1pt = (pd_entry_t *)va; while (va < eva) { l2b = pmap_get_l2_bucket(pmap_kernel(), va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; *ptep = pte; PTE_SYNC(ptep); cpu_tlb_flushD_SE(va); va += PAGE_SIZE; } pmap_init_l1(l1, pl1pt); } #ifdef DEBUG printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", needed); #endif } /* * This is used to stuff certain critical values into the PCB where they * can be accessed quickly from cpu_switch() et al. */ void pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) { struct l2_bucket *l2b; pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | (DOMAIN_CLIENT << (pm->pm_domain * 2)); if (vector_page < KERNBASE) { pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; l2b = pmap_get_l2_bucket(pm, vector_page); pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); } else pcb->pcb_pl1vec = NULL; } void pmap_activate(struct thread *td) { pmap_t pm; struct pcb *pcb; int s; pm = vmspace_pmap(td->td_proc->p_vmspace); pcb = td->td_pcb; critical_enter(); pmap_set_pcb_pagedir(pm, pcb); if (td == curthread) { u_int cur_dacr, cur_ttb; __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); cur_ttb &= ~(L1_TABLE_SIZE - 1); if (cur_ttb == (u_int)pcb->pcb_pagedir && cur_dacr == pcb->pcb_dacr) { /* * No need to switch address spaces. */ critical_exit(); return; } /* * We MUST, I repeat, MUST fix up the L1 entry corresponding * to 'vector_page' in the incoming L1 table before switching * to it otherwise subsequent interrupts/exceptions (including * domain faults!) will jump into hyperspace. */ if (pcb->pcb_pl1vec) { *pcb->pcb_pl1vec = pcb->pcb_l1vec; /* * Don't need to PTE_SYNC() at this point since * cpu_setttb() is about to flush both the cache * and the TLB. */ } cpu_domains(pcb->pcb_dacr); cpu_setttb(pcb->pcb_pagedir); splx(s); } critical_exit(); } static int pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) { pd_entry_t *pdep, pde; pt_entry_t *ptep, pte; vm_offset_t pa; int rv = 0; /* * Make sure the descriptor itself has the correct cache mode */ pdep = &kl1[L1_IDX(va)]; pde = *pdep; if (l1pte_section_p(pde)) { if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { *pdep = (pde & ~L1_S_CACHE_MASK) | pte_l1_s_cache_mode_pt; PTE_SYNC(pdep); cpu_dcache_wbinv_range((vm_offset_t)pdep, sizeof(*pdep)); rv = 1; } } else { pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); ptep = (pt_entry_t *)kernel_pt_lookup(pa); if (ptep == NULL) panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); ptep = &ptep[l2pte_index(va)]; pte = *ptep; if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; PTE_SYNC(ptep); cpu_dcache_wbinv_range((vm_offset_t)ptep, sizeof(*ptep)); rv = 1; } } return (rv); } static void pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, pt_entry_t **ptep) { vm_offset_t va = *availp; struct l2_bucket *l2b; if (ptep) { l2b = pmap_get_l2_bucket(pmap_kernel(), va); if (l2b == NULL) panic("pmap_alloc_specials: no l2b for 0x%x", va); *ptep = &l2b->l2b_kva[l2pte_index(va)]; } *vap = va; *availp = va + (PAGE_SIZE * pages); } /* * Bootstrap the system enough to run with virtual memory. * * On the arm this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address * from the linked base (virtual) address "KERNBASE" to the actual * (physical) address starting relative to 0] */ #define PMAP_STATIC_L2_SIZE 16 #ifdef ARM_USE_SMALL_ALLOC extern struct mtx smallalloc_mtx; extern vm_offset_t alloc_firstaddr; #endif void pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt) { static struct l1_ttable static_l1; static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; struct l1_ttable *l1 = &static_l1; struct l2_dtable *l2; struct l2_bucket *l2b; pd_entry_t pde; pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; pt_entry_t *ptep; vm_paddr_t pa; vm_offset_t va; vm_size_t size; int l1idx, l2idx, l2next = 0; PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n", firstaddr, loadaddr)); virtual_avail = firstaddr; kernel_pmap = &kernel_pmap_store; kernel_pmap->pm_l1 = l1; /* * Scan the L1 translation table created by initarm() and create * the required metadata for all valid mappings found in it. */ for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { pde = kernel_l1pt[l1idx]; /* * We're only interested in Coarse mappings. * pmap_extract() can deal with section mappings without * recourse to checking L2 metadata. */ if ((pde & L1_TYPE_MASK) != L1_TYPE_C) continue; /* * Lookup the KVA of this L2 descriptor table */ pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); ptep = (pt_entry_t *)kernel_pt_lookup(pa); if (ptep == NULL) { panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); } /* * Fetch the associated L2 metadata structure. * Allocate a new one if necessary. */ if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { if (l2next == PMAP_STATIC_L2_SIZE) panic("pmap_bootstrap: out of static L2s"); kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; } /* * One more L1 slot tracked... */ l2->l2_occupancy++; /* * Fill in the details of the L2 descriptor in the * appropriate bucket. */ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; l2b->l2b_kva = ptep; l2b->l2b_phys = pa; l2b->l2b_l1idx = l1idx; /* * Establish an initial occupancy count for this descriptor */ for (l2idx = 0; l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); l2idx++) { if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { l2b->l2b_occupancy++; } } /* * Make sure the descriptor itself has the correct cache mode. * If not, fix it, but whine about the problem. Port-meisters * should consider this a clue to fix up their initarm() * function. :) */ if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { printf("pmap_bootstrap: WARNING! wrong cache mode for " "L2 pte @ %p\n", ptep); } } /* * Ensure the primary (kernel) L1 has the correct cache mode for * a page table. Bitch if it is not correctly set. */ for (va = (vm_offset_t)kernel_l1pt; va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { if (pmap_set_pt_cache_mode(kernel_l1pt, va)) printf("pmap_bootstrap: WARNING! wrong cache mode for " "primary L1 @ 0x%x\n", va); } cpu_dcache_wbinv_all(); cpu_tlb_flushID(); cpu_cpwait(); kernel_pmap->pm_active = -1; kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; LIST_INIT(&allpmaps); TAILQ_INIT(&kernel_pmap->pm_pvlist); LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; pmap_alloc_specials(&virtual_avail, round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, &pmap_kernel_l2ptp_kva, NULL); size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; pmap_alloc_specials(&virtual_avail, round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, &pmap_kernel_l2dtable_kva, NULL); pmap_alloc_specials(&virtual_avail, 1, (vm_offset_t*)&_tmppt, NULL); SLIST_INIT(&l1_list); TAILQ_INIT(&l1_lru_list); mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); pmap_init_l1(l1, kernel_l1pt); cpu_dcache_wbinv_all(); virtual_avail = round_page(virtual_avail); virtual_end = lastaddr; kernel_vm_end = pmap_curmaxkvaddr; arm_nocache_startaddr = lastaddr; mtx_init(&cmtx, "TMP mappins mtx", NULL, MTX_DEF); #ifdef ARM_USE_SMALL_ALLOC mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF); arm_init_smallalloc(); #endif } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { struct pcb *pcb; pmap_idcache_wbinv_all(pmap); pmap_tlb_flushID(pmap); cpu_cpwait(); LIST_REMOVE(pmap, pm_list); if (vector_page < KERNBASE) { struct pcb *curpcb = PCPU_GET(curpcb); pcb = thread0.td_pcb; if (pmap_is_current(pmap)) { /* * Frob the L1 entry corresponding to the vector * page so that it contains the kernel pmap's domain * number. This will ensure pmap_remove() does not * pull the current vector page out from under us. */ critical_enter(); *pcb->pcb_pl1vec = pcb->pcb_l1vec; cpu_domains(pcb->pcb_dacr); cpu_setttb(pcb->pcb_pagedir); critical_exit(); } pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); /* * Make sure cpu_switch(), et al, DTRT. This is safe to do * since this process has no remaining mappings of its own. */ curpcb->pcb_pl1vec = pcb->pcb_pl1vec; curpcb->pcb_l1vec = pcb->pcb_l1vec; curpcb->pcb_dacr = pcb->pcb_dacr; curpcb->pcb_pagedir = pcb->pcb_pagedir; } pmap_free_l1(pmap); dprintf("pmap_release()\n"); } /* * Helper function for pmap_grow_l2_bucket() */ static __inline int pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) { struct l2_bucket *l2b; pt_entry_t *ptep; vm_paddr_t pa; struct vm_page *pg; pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (pg == NULL) return (1); pa = VM_PAGE_TO_PHYS(pg); if (pap) *pap = pa; l2b = pmap_get_l2_bucket(pmap_kernel(), va); ptep = &l2b->l2b_kva[l2pte_index(va)]; *ptep = L2_S_PROTO | pa | cache_mode | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); PTE_SYNC(ptep); return (0); } /* * This is the same as pmap_alloc_l2_bucket(), except that it is only * used by pmap_growkernel(). */ static __inline struct l2_bucket * pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; struct l1_ttable *l1; pd_entry_t *pl1pd; u_short l1idx; vm_offset_t nva; l1idx = L1_IDX(va); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { /* * No mapping at this address, as there is * no entry in the L1 table. * Need to allocate a new l2_dtable. */ nva = pmap_kernel_l2dtable_kva; if ((nva & PAGE_MASK) == 0) { /* * Need to allocate a backing page */ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) return (NULL); } l2 = (struct l2_dtable *)nva; nva += sizeof(struct l2_dtable); if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & PAGE_MASK)) { /* * The new l2_dtable straddles a page boundary. * Map in another page to cover it. */ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) return (NULL); } pmap_kernel_l2dtable_kva = nva; /* * Link it into the parent pmap */ pm->pm_l2[L2_IDX(l1idx)] = l2; memset(l2, 0, sizeof(*l2)); } l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; /* * Fetch pointer to the L2 page table associated with the address. */ if (l2b->l2b_kva == NULL) { pt_entry_t *ptep; /* * No L2 page table has been allocated. Chances are, this * is because we just allocated the l2_dtable, above. */ nva = pmap_kernel_l2ptp_kva; ptep = (pt_entry_t *)nva; if ((nva & PAGE_MASK) == 0) { /* * Need to allocate a backing page */ if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, &pmap_kernel_l2ptp_phys)) return (NULL); PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); } memset(ptep, 0, L2_TABLE_SIZE_REAL); l2->l2_occupancy++; l2b->l2b_kva = ptep; l2b->l2b_l1idx = l1idx; l2b->l2b_phys = pmap_kernel_l2ptp_phys; pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; } /* Distribute new L1 entry to all other L1s */ SLIST_FOREACH(l1, &l1_list, l1_link) { pl1pd = &l1->l1_kva[L1_IDX(va)]; *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; PTE_SYNC(pl1pd); } return (l2b); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { pmap_t kpm = pmap_kernel(); int s; if (addr <= pmap_curmaxkvaddr) return; /* we are OK */ /* * whoops! we need to add kernel PTPs */ s = splhigh(); /* to be safe */ /* Map 1MB at a time */ for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); /* * flush out the cache, expensive but growkernel will happen so * rarely */ cpu_dcache_wbinv_all(); cpu_tlb_flushD(); cpu_cpwait(); kernel_vm_end = pmap_curmaxkvaddr; } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { switch(prot) { case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: case VM_PROT_READ|VM_PROT_WRITE: return; case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: pmap_clearbit(m, PVF_WRITE); break; default: pmap_remove_all(m); break; } } /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct pv_entry *pv, *npv; struct l2_bucket *l2b = NULL; vm_page_t m; pt_entry_t *pt; vm_page_lock_queues(); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_va >= eva || pv->pv_va < sva) { npv = TAILQ_NEXT(pv, pv_plist); continue; } if (pv->pv_flags & PVF_WIRED) { /* The page is wired, cannot remove it now. */ npv = TAILQ_NEXT(pv, pv_plist); continue; } pmap->pm_stats.resident_count--; l2b = pmap_get_l2_bucket(pmap, pv->pv_va); KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); *pt = 0; PTE_SYNC(pt); npv = TAILQ_NEXT(pv, pv_plist); pmap_nuke_pv(m, pmap, pv); pmap_free_pv_entry(pv); pmap_free_l2_bucket(pmap, l2b, 1); } vm_page_unlock_queues(); cpu_idcache_wbinv_all(); cpu_tlb_flushID(); cpu_cpwait(); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* Map a section into the KVA. */ void pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) { pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); struct l1_ttable *l1; KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("Not a valid section mapping")); if (flags & SECTION_CACHE) pd |= pte_l1_s_cache_mode; else if (flags & SECTION_PT) pd |= pte_l1_s_cache_mode_pt; SLIST_FOREACH(l1, &l1_list, l1_link) { l1->l1_kva[L1_IDX(va)] = pd; PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); } } /* * add a wired page to the kva * note that in order for the mapping to take effect -- you * should do a invltlb after doing the pmap_kenter... */ static PMAP_INLINE void pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) { struct l2_bucket *l2b; pt_entry_t *pte; pt_entry_t opte; PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", (uint32_t) va, (uint32_t) pa)); l2b = pmap_get_l2_bucket(pmap_kernel(), va); if (l2b == NULL) l2b = pmap_grow_l2_bucket(pmap_kernel(), va); KASSERT(l2b != NULL, ("No L2 Bucket")); pte = &l2b->l2b_kva[l2pte_index(va)]; opte = *pte; PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", (uint32_t) pte, opte, *pte)); if (l2pte_valid(opte)) { cpu_dcache_wbinv_range(va, PAGE_SIZE); cpu_tlb_flushD_SE(va); cpu_cpwait(); } else { if (opte == 0) l2b->l2b_occupancy++; } *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); if (flags & KENTER_CACHE) *pte |= pte_l2_s_cache_mode; if (flags & KENTER_USER) *pte |= L2_S_PROT_U; PTE_SYNC(pte); } void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, KENTER_CACHE); } void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, 0); } void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); /* * Call pmap_fault_fixup now, to make sure we'll have no exception * at the first use of the new address, or bad things will happen, * as we use one of these addresses in the exception handlers. */ pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1); } /* * remove a page rom the kernel pagetables */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { struct l2_bucket *l2b; pt_entry_t *pte, opte; l2b = pmap_get_l2_bucket(pmap_kernel(), va); if (!l2b) return; KASSERT(l2b != NULL, ("No L2 Bucket")); pte = &l2b->l2b_kva[l2pte_index(va)]; opte = *pte; if (l2pte_valid(opte)) { cpu_dcache_wbinv_range(va, PAGE_SIZE); cpu_tlb_flushD_SE(va); cpu_cpwait(); *pte = 0; } } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { #ifdef ARM_USE_SMALL_ALLOC return (arm_ptovirt(start)); #else vm_offset_t sva = *virt; vm_offset_t va = sva; PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, prot)); while (start < end) { pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } *virt = va; return (sva); #endif } static void pmap_wb_page(vm_page_t m, boolean_t do_inv) { struct pv_entry *pv; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, do_inv, (pv->pv_flags & PVF_WRITE) == 0); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++) { pmap_wb_page(m[i], TRUE); pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), KENTER_CACHE); va += PAGE_SIZE; } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t va, int count) { vm_paddr_t pa; int i; for (i = 0; i < count; i++) { pa = vtophys(va); if (pa) { pmap_wb_page(PHYS_TO_VM_PAGE(pa), TRUE); pmap_kremove(va); } va += PAGE_SIZE; } } /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte; if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) return (FALSE); KASSERT(pte != NULL, ("Valid mapping but no pte ?")); if (*pte == 0) return (TRUE); return (FALSE); } /* * Fetch pointers to the PDE/PTE for the given pmap/VA pair. * Returns TRUE if the mapping exists, else FALSE. * * NOTE: This function is only used by a couple of arm-specific modules. * It is not safe to take any pmap locks here, since we could be right * in the middle of debugging the pmap anyway... * * It is possible for this routine to return FALSE even though a valid * mapping does exist. This is because we don't lock, so the metadata * state may be inconsistent. * * NOTE: We can return a NULL *ptp in the case where the L1 pde is * a "section" mapping. */ boolean_t pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep; u_short l1idx; if (pm->pm_l1 == NULL) return (FALSE); l1idx = L1_IDX(va); *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; l1pd = *pl1pd; if (l1pte_section_p(l1pd)) { *ptp = NULL; return (TRUE); } if (pm->pm_l2 == NULL) return (FALSE); l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { return (FALSE); } *ptp = &ptep[l2pte_index(va)]; return (TRUE); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pv_entry_t pv; pt_entry_t *ptep, pte; struct l2_bucket *l2b; boolean_t flush = FALSE; pmap_t curpm; int flags = 0; #if defined(PMAP_DEBUG) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ if (m->flags & PG_FICTITIOUS) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m)); } #endif if (TAILQ_EMPTY(&m->md.pv_list)) return; curpm = vmspace_pmap(curproc->p_vmspace); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { if (flush == FALSE && (pv->pv_pmap == curpm || pv->pv_pmap == pmap_kernel())) flush = TRUE; l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); KASSERT(l2b != NULL, ("No l2 bucket")); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; pte = *ptep; *ptep = 0; PTE_SYNC_CURRENT(pv->pv_pmap, ptep); pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); if (pv->pv_flags & PVF_WIRED) pv->pv_pmap->pm_stats.wired_count--; pv->pv_pmap->pm_stats.resident_count--; flags |= pv->pv_flags; pmap_nuke_pv(m, pv->pv_pmap, pv); pmap_free_pv_entry(pv); } if (flush) { if (PV_BEEN_EXECD(flags)) pmap_tlb_flushID(curpm); else pmap_tlb_flushD(curpm); } } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; vm_offset_t next_bucket; u_int flags; int flush; if ((prot & VM_PROT_READ) == 0) { mtx_lock(&Giant); pmap_remove(pm, sva, eva); mtx_unlock(&Giant); return; } if (prot & VM_PROT_WRITE) { /* * If this is a read->write transition, just ignore it and let * vm_fault() take care of it later. */ return; } mtx_lock(&Giant); /* * OK, at this point, we know we're doing write-protect operation. * If the pmap is active, write-back the range. */ pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; flags = 0; vm_page_lock_queues(); while (sva < eva) { next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pm, sva); if (l2b == NULL) { sva = next_bucket; continue; } ptep = &l2b->l2b_kva[l2pte_index(sva)]; while (sva < next_bucket) { if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { struct vm_page *pg; u_int f; pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); pte &= ~L2_S_PROT_W; *ptep = pte; PTE_SYNC(ptep); if (pg != NULL) { f = pmap_modify_pv(pg, pm, sva, PVF_WRITE, 0); pmap_vac_me_harder(pg, pm, sva); vm_page_dirty(pg); } else f = PVF_REF | PVF_EXEC; if (flush >= 0) { flush++; flags |= f; } else if (PV_BEEN_EXECD(f)) pmap_tlb_flushID_SE(pm, sva); else if (PV_BEEN_REFD(f)) pmap_tlb_flushD_SE(pm, sva); } sva += PAGE_SIZE; ptep++; } } if (flush) { if (PV_BEEN_EXECD(flags)) pmap_tlb_flushID(pm); else if (PV_BEEN_REFD(flags)) pmap_tlb_flushD(pm); } vm_page_unlock_queues(); mtx_unlock(&Giant); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_page_lock_queues(); pmap_enter_locked(pmap, va, m, prot, wired); vm_page_unlock_queues(); } /* * The page queues and pmap must be locked. */ static void pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct l2_bucket *l2b = NULL; struct vm_page *opg; struct pv_entry *pve = NULL; pt_entry_t *ptep, npte, opte; u_int nflags; u_int oflags; vm_paddr_t pa; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (va == vector_page) { pa = systempage.pv_pa; m = NULL; } else pa = VM_PAGE_TO_PHYS(m); nflags = 0; if (prot & VM_PROT_WRITE) nflags |= PVF_WRITE; if (prot & VM_PROT_EXECUTE) nflags |= PVF_EXEC; if (wired) nflags |= PVF_WIRED; PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); if (pmap == pmap_kernel()) { l2b = pmap_get_l2_bucket(pmap, va); if (l2b == NULL) l2b = pmap_grow_l2_bucket(pmap, va); } else l2b = pmap_alloc_l2_bucket(pmap, va); KASSERT(l2b != NULL, ("pmap_enter: failed to allocate l2 bucket")); ptep = &l2b->l2b_kva[l2pte_index(va)]; opte = *ptep; npte = pa; oflags = 0; if (opte) { /* * There is already a mapping at this address. * If the physical address is different, lookup the * vm_page. */ if (l2pte_pa(opte) != pa) opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); else opg = m; } else opg = NULL; if ((prot & (VM_PROT_ALL)) || (!m || m->md.pvh_attrs & PVF_REF)) { /* * - The access type indicates that we don't need * to do referenced emulation. * OR * - The physical page has already been referenced * so no need to re-do referenced emulation here. */ npte |= L2_S_PROTO; nflags |= PVF_REF; if (m && ((prot & VM_PROT_WRITE) != 0 || (m->md.pvh_attrs & PVF_MOD))) { /* * This is a writable mapping, and the * page's mod state indicates it has * already been modified. Make it * writable from the outset. */ nflags |= PVF_MOD; if (!(m->md.pvh_attrs & PVF_MOD)) vm_page_dirty(m); } if (m && opte) vm_page_flag_set(m, PG_REFERENCED); } else { /* * Need to do page referenced emulation. */ npte |= L2_TYPE_INV; } if (prot & VM_PROT_WRITE) npte |= L2_S_PROT_W; npte |= pte_l2_s_cache_mode; if (m && m == opg) { /* * We're changing the attrs of an existing mapping. */ #if 0 simple_lock(&pg->mdpage.pvh_slock); #endif oflags = pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_EXEC | PVF_WIRED | PVF_MOD | PVF_REF, nflags); #if 0 simple_unlock(&pg->mdpage.pvh_slock); #endif /* * We may need to flush the cache if we're * doing rw-ro... */ if (pmap_is_current(pmap) && (oflags & PVF_NC) == 0 && (opte & L2_S_PROT_W) != 0 && (prot & VM_PROT_WRITE) == 0) cpu_dcache_wb_range(va, PAGE_SIZE); } else { /* * New mapping, or changing the backing page * of an existing mapping. */ if (opg) { /* * Replacing an existing mapping with a new one. * It is part of our managed memory so we * must remove it from the PV list */ #if 0 simple_lock(&opg->mdpage.pvh_slock); #endif pve = pmap_remove_pv(opg, pmap, va); if (m && (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) && pve) pmap_free_pv_entry(pve); else if (!pve && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) pve = pmap_get_pv_entry(); KASSERT(pve != NULL, ("No pv")); #if 0 simple_unlock(&opg->mdpage.pvh_slock); #endif oflags = pve->pv_flags; /* * If the old mapping was valid (ref/mod * emulation creates 'invalid' mappings * initially) then make sure to frob * the cache. */ if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { if (PV_BEEN_EXECD(oflags)) { pmap_idcache_wbinv_range(pmap, va, PAGE_SIZE); } else if (PV_BEEN_REFD(oflags)) { pmap_dcache_wb_range(pmap, va, PAGE_SIZE, TRUE, (oflags & PVF_WRITE) == 0); } } } else if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) if ((pve = pmap_get_pv_entry()) == NULL) { panic("pmap_enter: no pv entries"); } if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_enter_pv(m, pve, pmap, va, nflags); } } /* * Make sure userland mappings get the right permissions */ if (pmap != pmap_kernel() && va != vector_page) { npte |= L2_S_PROT_U; } /* * Keep the stats up to date */ if (opte == 0) { l2b->l2b_occupancy++; pmap->pm_stats.resident_count++; } /* * If this is just a wiring change, the two PTEs will be * identical, so there's no need to update the page table. */ if (npte != opte) { boolean_t is_cached = pmap_is_current(pmap); *ptep = npte; if (is_cached) { /* * We only need to frob the cache/tlb if this pmap * is current */ PTE_SYNC(ptep); if (L1_IDX(va) != L1_IDX(vector_page) && l2pte_valid(npte)) { /* * This mapping is likely to be accessed as * soon as we return to userland. Fix up the * L1 entry to avoid taking another * page/domain fault. */ pd_entry_t *pl1pd, l1pd; pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO; if (*pl1pd != l1pd) { *pl1pd = l1pd; PTE_SYNC(pl1pd); } } } if (PV_BEEN_EXECD(oflags)) pmap_tlb_flushID_SE(pmap, va); else if (PV_BEEN_REFD(oflags)) pmap_tlb_flushD_SE(pmap, va); if (m) pmap_vac_me_harder(m, pmap, va); } } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; psize = atop(end - start); m = m_start; while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pmap, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ -vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - return (NULL); } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; vm_page_t pg; l2b = pmap_get_l2_bucket(pmap, va); KASSERT(l2b, ("No l2b bucket in pmap_change_wiring")); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); if (pg) pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; u_int l1idx; l1idx = L1_IDX(va); pl1pd = &pm->pm_l1->l1_kva[l1idx]; l1pd = *pl1pd; if (l1pte_section_p(l1pd)) { /* * These should only happen for pmap_kernel() */ KASSERT(pm == pmap_kernel(), ("huh")); pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); } else { /* * Note that we can't rely on the validity of the L1 * descriptor as an indication that a mapping exists. * We have to look it up in the L2 dtable. */ l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { return (0); } ptep = &ptep[l2pte_index(va)]; pte = *ptep; if (pte == 0) return (0); switch (pte & L2_TYPE_MASK) { case L2_TYPE_L: pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); break; default: pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); break; } } return (pa); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. * */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; vm_page_t m = NULL; u_int l1idx; l1idx = L1_IDX(va); pl1pd = &pmap->pm_l1->l1_kva[l1idx]; l1pd = *pl1pd; vm_page_lock_queues(); if (l1pte_section_p(l1pd)) { /* * These should only happen for pmap_kernel() */ KASSERT(pmap == pmap_kernel(), ("huh")); pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); } } else { /* * Note that we can't rely on the validity of the L1 * descriptor as an indication that a mapping exists. * We have to look it up in the L2 dtable. */ l2 = pmap->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { return (NULL); } ptep = &ptep[l2pte_index(va)]; pte = *ptep; if (pte == 0) return (NULL); if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { switch (pte & L2_TYPE_MASK) { case L2_TYPE_L: pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); break; default: pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); break; } m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); } } vm_page_unlock_queues(); return (m); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(pmap_t pmap) { PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); pmap_alloc_l1(pmap); bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); pmap->pm_count = 1; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_stats.resident_count = 1; if (vector_page < KERNBASE) { pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ, 1); } } /*************************************************** * page management routines. ***************************************************/ static void pmap_free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t pmap_get_pv_entry(void) { pv_entry_t ret_value; pv_entry_count++; if (pv_entry_high_water && (pv_entry_count > pv_entry_high_water) && (pmap_pagedaemon_waken == 0)) { pmap_pagedaemon_waken = 1; wakeup (&vm_pages_needed); } ret_value = uma_zalloc(pvzone, M_NOWAIT); return ret_value; } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct l2_bucket *l2b; vm_offset_t next_bucket; pt_entry_t *ptep; u_int cleanlist_idx, total, cnt; struct { vm_offset_t va; pt_entry_t *pte; } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; u_int mappings, is_exec, is_refd; int flushall = 0; /* * we lock in the pmap => pv_head direction */ #if 0 PMAP_MAP_TO_HEAD_LOCK(); pmap_acquire_pmap_lock(pm); #endif vm_page_lock_queues(); if (!pmap_is_current(pm)) { cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; } else cleanlist_idx = 0; total = 0; while (sva < eva) { /* * Do one L2 bucket's worth at a time. */ next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pm, sva); if (l2b == NULL) { sva = next_bucket; continue; } ptep = &l2b->l2b_kva[l2pte_index(sva)]; mappings = 0; while (sva < next_bucket) { struct vm_page *pg; pt_entry_t pte; vm_paddr_t pa; pte = *ptep; if (pte == 0) { /* * Nothing here, move along */ sva += PAGE_SIZE; ptep++; continue; } pm->pm_stats.resident_count--; pa = l2pte_pa(pte); is_exec = 0; is_refd = 1; /* * Update flags. In a number of circumstances, * we could cluster a lot of these and do a * number of sequential pages in one go. */ if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { struct pv_entry *pve; #if 0 simple_lock(&pg->mdpage.pvh_slock); #endif pve = pmap_remove_pv(pg, pm, sva); if (pve) { #if 0 simple_unlock(&pg->mdpage.pvh_slock); #endif is_exec = PV_BEEN_EXECD(pve->pv_flags); is_refd = PV_BEEN_REFD(pve->pv_flags); pmap_free_pv_entry(pve); } } if (!l2pte_valid(pte)) { *ptep = 0; PTE_SYNC_CURRENT(pm, ptep); sva += PAGE_SIZE; ptep++; mappings++; continue; } if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { /* Add to the clean list. */ cleanlist[cleanlist_idx].pte = ptep; cleanlist[cleanlist_idx].va = sva | (is_exec & 1); cleanlist_idx++; } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { /* Nuke everything if needed. */ pmap_idcache_wbinv_all(pm); pmap_tlb_flushID(pm); /* * Roll back the previous PTE list, * and zero out the current PTE. */ for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { *cleanlist[cnt].pte = 0; } *ptep = 0; PTE_SYNC(ptep); cleanlist_idx++; flushall = 1; } else { *ptep = 0; PTE_SYNC(ptep); if (is_exec) pmap_tlb_flushID_SE(pm, sva); else if (is_refd) pmap_tlb_flushD_SE(pm, sva); } sva += PAGE_SIZE; ptep++; mappings++; } /* * Deal with any left overs */ if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { total += cleanlist_idx; for (cnt = 0; cnt < cleanlist_idx; cnt++) { vm_offset_t clva = cleanlist[cnt].va & ~1; if (cleanlist[cnt].va & 1) { pmap_idcache_wbinv_range(pm, clva, PAGE_SIZE); pmap_tlb_flushID_SE(pm, clva); } else { pmap_dcache_wb_range(pm, clva, PAGE_SIZE, TRUE, FALSE); pmap_tlb_flushD_SE(pm, clva); } *cleanlist[cnt].pte = 0; PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte); } if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) cleanlist_idx = 0; else { /* * We are removing so much entries it's just * easier to flush the whole cache. */ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; pmap_idcache_wbinv_all(pm); flushall = 1; } } pmap_free_l2_bucket(pm, l2b, mappings); } vm_page_unlock_queues(); if (flushall) cpu_tlb_flushID(); #if 0 pmap_release_pmap_lock(pm); PMAP_MAP_TO_HEAD_UNLOCK(); #endif } /* * pmap_zero_page() * * Zero a given physical page by mapping it at a page hook point. * In doing the zero page op, the page we zero is mapped cachable, as with * StrongARM accesses to non-cached pages are non-burst making writing * _any_ bulk data very slow. */ #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 void pmap_zero_page_generic(vm_paddr_t phys, int off, int size) { #ifdef ARM_USE_SMALL_ALLOC char *dstpg; #endif #ifdef DEBUG struct vm_page *pg = PHYS_TO_VM_PAGE(phys); if (pg->md.pvh_list != NULL) panic("pmap_zero_page: page has mappings"); #endif #ifdef ARM_USE_SMALL_ALLOC dstpg = (char *)arm_ptovirt(phys); if (off || size != PAGE_SIZE) { bzero(dstpg + off, size); cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size); } else { bzero_page((vm_offset_t)dstpg); cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE); } #else mtx_lock(&cmtx); /* * Hook in the page, zero it, and purge the cache for that * zeroed page. Invalidate the TLB as needed. */ *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); if (off || size != PAGE_SIZE) bzero((void *)(cdstp + off), size); else bzero_page(cdstp); mtx_unlock(&cmtx); cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); #endif } #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ #if ARM_MMU_XSCALE == 1 void pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) { mtx_lock(&cmtx); /* * Hook in the page, zero it, and purge the cache for that * zeroed page. Invalidate the TLB as needed. */ *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); if (off || size != PAGE_SIZE) bzero((void *)(cdstp + off), size); else bzero_page(cdstp); mtx_unlock(&cmtx); xscale_cache_clean_minidata(); } /* * Change the PTEs for the specified kernel mappings such that they * will use the mini data cache instead of the main data cache. */ void pmap_use_minicache(vm_offset_t va, vm_size_t size) { struct l2_bucket *l2b; pt_entry_t *ptep, *sptep, pte; vm_offset_t next_bucket, eva; #if (ARM_NMMUS > 1) if (xscale_use_minidata == 0) return; #endif eva = va + size; while (va < eva) { next_bucket = L2_NEXT_BUCKET(va); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pmap_kernel(), va); sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; while (va < next_bucket) { pte = *ptep; if (!l2pte_minidata(pte)) { cpu_dcache_wbinv_range(va, PAGE_SIZE); cpu_tlb_flushD_SE(va); *ptep = pte & ~L2_B; } ptep++; va += PAGE_SIZE; } PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); } cpu_cpwait(); } #endif /* ARM_MMU_XSCALE == 1 */ /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. This * is intended to be called from the vm_pagezero process only and * outside of Giant. */ void pmap_zero_page_idle(vm_page_t m) { pmap_zero_page(m); } /* * pmap_clean_page() * * This is a local function used to work out the best strategy to clean * a single page referenced by its entry in the PV table. It's used by * pmap_copy_page, pmap_zero page and maybe some others later on. * * Its policy is effectively: * o If there are no mappings, we don't bother doing anything with the cache. * o If there is one mapping, we clean just that page. * o If there are multiple mappings, we clean the entire cache. * * So that some functions can be further optimised, it returns 0 if it didn't * clean the entire cache, or 1 if it did. * * XXX One bug in this routine is that if the pv_entry has a single page * mapped at 0x00000000 a whole cache clean will be performed rather than * just the 1 page. Since this should not occur in everyday use and if it does * it will just result in not the most efficient clean for the page. */ static int pmap_clean_page(struct pv_entry *pv, boolean_t is_src) { pmap_t pm, pm_to_clean = NULL; struct pv_entry *npv; u_int cache_needs_cleaning = 0; u_int flags = 0; vm_offset_t page_to_clean = 0; if (pv == NULL) { /* nothing mapped in so nothing to flush */ return (0); } /* * Since we flush the cache each time we change to a different * user vmspace, we only need to flush the page if it is in the * current pmap. */ if (curthread) pm = vmspace_pmap(curproc->p_vmspace); else pm = pmap_kernel(); for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { flags |= npv->pv_flags; /* * The page is mapped non-cacheable in * this map. No need to flush the cache. */ if (npv->pv_flags & PVF_NC) { #ifdef DIAGNOSTIC if (cache_needs_cleaning) panic("pmap_clean_page: " "cache inconsistency"); #endif break; } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) continue; if (cache_needs_cleaning) { page_to_clean = 0; break; } else { page_to_clean = npv->pv_va; pm_to_clean = npv->pv_pmap; } cache_needs_cleaning = 1; } } if (page_to_clean) { if (PV_BEEN_EXECD(flags)) pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, PAGE_SIZE); else pmap_dcache_wb_range(pm_to_clean, page_to_clean, PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); } else if (cache_needs_cleaning) { if (PV_BEEN_EXECD(flags)) pmap_idcache_wbinv_all(pm); else pmap_dcache_wbinv_all(pm); return (1); } return (0); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ /* * pmap_copy_page() * * Copy one physical page into another, by mapping the pages into * hook points. The same comment regarding cachability as in * pmap_zero_page also applies here. */ #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 void pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) { struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); #ifdef DEBUG struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); if (dst_pg->md.pvh_list != NULL) panic("pmap_copy_page: dst page has mappings"); #endif /* * Clean the source page. Hold the source page's lock for * the duration of the copy so that no other mappings can * be created while we have a potentially aliased mapping. */ #if 0 mtx_lock(&src_pg->md.pvh_mtx); #endif (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); /* * Map the pages into the page hook points, copy them, and purge * the cache for the appropriate page. Invalidate the TLB * as required. */ mtx_lock(&cmtx); *csrc_pte = L2_S_PROTO | src | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; PTE_SYNC(csrc_pte); *cdst_pte = L2_S_PROTO | dst | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(csrcp); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); bcopy_page(csrcp, cdstp); mtx_unlock(&cmtx); cpu_dcache_inv_range(csrcp, PAGE_SIZE); #if 0 mtx_lock(&src_pg->md.pvh_mtx); #endif cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); } #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ #if ARM_MMU_XSCALE == 1 void pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) { struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); #ifdef DEBUG struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); if (dst_pg->md.pvh_list != NULL) panic("pmap_copy_page: dst page has mappings"); #endif /* * Clean the source page. Hold the source page's lock for * the duration of the copy so that no other mappings can * be created while we have a potentially aliased mapping. */ (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); /* * Map the pages into the page hook points, copy them, and purge * the cache for the appropriate page. Invalidate the TLB * as required. */ mtx_lock(&cmtx); *csrc_pte = L2_S_PROTO | src | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ PTE_SYNC(csrc_pte); *cdst_pte = L2_S_PROTO | dst | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(csrcp); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); bcopy_page(csrcp, cdstp); mtx_unlock(&cmtx); xscale_cache_clean_minidata(); } #endif /* ARM_MMU_XSCALE == 1 */ void pmap_copy_page(vm_page_t src, vm_page_t dst) { #ifdef ARM_USE_SMALL_ALLOC vm_offset_t srcpg, dstpg; #endif cpu_dcache_wbinv_all(); #ifdef ARM_USE_SMALL_ALLOC srcpg = arm_ptovirt(VM_PAGE_TO_PHYS(src)); dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst)); bcopy_page(srcpg, dstpg); cpu_dcache_wbinv_range(dstpg, PAGE_SIZE); #else pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); #endif } /* * this routine returns true if a physical page resides * in the given pmap. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops = 0; int s; if (m->flags & PG_FICTITIOUS) return (FALSE); s = splvm(); /* * Not found, check current mappings returning immediately */ for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { if (pv->pv_pmap == pmap) { splx(s); return (TRUE); } loops++; if (loops >= 16) break; } splx(s); return (FALSE); } /* * pmap_ts_referenced: * * Return the count of reference bits for a page, clearing all of them. */ int pmap_ts_referenced(vm_page_t m) { return (pmap_clearbit(m, PVF_REF)); } boolean_t pmap_is_modified(vm_page_t m) { if (m->md.pvh_attrs & PVF_MOD) return (TRUE); return(FALSE); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { if (m->md.pvh_attrs & PVF_MOD) pmap_clearbit(m, PVF_MOD); } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { if (m->md.pvh_attrs & PVF_REF) pmap_clearbit(m, PVF_REF); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr) { printf("pmap_mincore()\n"); return (0); } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { return(addr); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, tmpva, offset; offset = pa & PAGE_MASK; size = roundup(size, PAGE_SIZE); GIANT_REQUIRED; va = kmem_alloc_nofault(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { pmap_kenter_internal(tmpva, pa, 0); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } return ((void *)(va + offset)); } #define BOOTSTRAP_DEBUG /* * pmap_map_section: * * Create a single section mapping. */ void pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pd_entry_t fl; KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); switch (cache) { case PTE_NOCACHE: default: fl = 0; break; case PTE_CACHE: fl = pte_l1_s_cache_mode; break; case PTE_PAGETABLE: fl = pte_l1_s_cache_mode_pt; break; } pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); PTE_SYNC(&pde[va >> L1_S_SHIFT]); } /* * pmap_link_l2pt: * * Link the L2 page table specified by "pa" into the L1 * page table at the slot for "va". */ void pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) { pd_entry_t *pde = (pd_entry_t *) l1pt, proto; u_int slot = va >> L1_S_SHIFT; #ifndef ARM32_NEW_VM_LAYOUT KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0, ("blah")); KASSERT((l2pv->pv_pa & PAGE_MASK) == 0, ("ouin")); #endif proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); #ifdef ARM32_NEW_VM_LAYOUT PTE_SYNC(&pde[slot]); #else pde[slot + 1] = proto | (l2pv->pv_pa + 0x400); pde[slot + 2] = proto | (l2pv->pv_pa + 0x800); pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00); PTE_SYNC_RANGE(&pde[slot + 0], 4); #endif SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); } /* * pmap_map_entry * * Create a single page mapping. */ void pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t fl; pt_entry_t *pte; KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); switch (cache) { case PTE_NOCACHE: default: fl = 0; break; case PTE_CACHE: fl = pte_l2_s_cache_mode; break; case PTE_PAGETABLE: fl = pte_l2_s_cache_mode_pt; break; } if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) panic("pmap_map_entry: no L2 table for VA 0x%08x", va); #ifndef ARM32_NEW_VM_LAYOUT pte = (pt_entry_t *) kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); #else pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); #endif if (pte == NULL) panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); #ifndef ARM32_NEW_VM_LAYOUT pte[(va >> PAGE_SHIFT) & 0x3ff] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); #else pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; PTE_SYNC(&pte[l2pte_index(va)]); #endif } /* * pmap_map_chunk: * * Map a chunk of memory using the most efficient mappings * possible (section. large page, small page) into the * provided L1 and L2 tables at the specified virtual address. */ vm_size_t pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, vm_size_t size, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t *pte, f1, f2s, f2l; vm_size_t resid; int i; resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); if (l1pt == 0) panic("pmap_map_chunk: no L1 table provided"); #ifdef VERBOSE_INIT_ARM printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); #endif switch (cache) { case PTE_NOCACHE: default: f1 = 0; f2l = 0; f2s = 0; break; case PTE_CACHE: f1 = pte_l1_s_cache_mode; f2l = pte_l2_l_cache_mode; f2s = pte_l2_s_cache_mode; break; case PTE_PAGETABLE: f1 = pte_l1_s_cache_mode_pt; f2l = pte_l2_l_cache_mode_pt; f2s = pte_l2_s_cache_mode_pt; break; } size = resid; while (resid > 0) { /* See if we can use a section mapping. */ if (L1_S_MAPPABLE_P(va, pa, resid)) { #ifdef VERBOSE_INIT_ARM printf("S"); #endif pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, prot) | f1 | L1_S_DOM(PMAP_DOMAIN_KERNEL); PTE_SYNC(&pde[va >> L1_S_SHIFT]); va += L1_S_SIZE; pa += L1_S_SIZE; resid -= L1_S_SIZE; continue; } /* * Ok, we're going to use an L2 table. Make sure * one is actually in the corresponding L1 slot * for the current VA. */ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); #ifndef ARM32_NEW_VM_LAYOUT pte = (pt_entry_t *) kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); #else pte = (pt_entry_t *) kernel_pt_lookup( pde[L1_IDX(va)] & L1_C_ADDR_MASK); #endif if (pte == NULL) panic("pmap_map_chunk: can't find L2 table for VA" "0x%08x", va); /* See if we can use a L2 large page mapping. */ if (L2_L_MAPPABLE_P(va, pa, resid)) { #ifdef VERBOSE_INIT_ARM printf("L"); #endif for (i = 0; i < 16; i++) { #ifndef ARM32_NEW_VM_LAYOUT pte[((va >> PAGE_SHIFT) & 0x3f0) + i] = L2_L_PROTO | pa | L2_L_PROT(PTE_KERNEL, prot) | f2l; PTE_SYNC(&pte[((va >> PAGE_SHIFT) & 0x3f0) + i]); #else pte[l2pte_index(va) + i] = L2_L_PROTO | pa | L2_L_PROT(PTE_KERNEL, prot) | f2l; PTE_SYNC(&pte[l2pte_index(va) + i]); #endif } va += L2_L_SIZE; pa += L2_L_SIZE; resid -= L2_L_SIZE; continue; } /* Use a small page mapping. */ #ifdef VERBOSE_INIT_ARM printf("P"); #endif #ifndef ARM32_NEW_VM_LAYOUT pte[(va >> PAGE_SHIFT) & 0x3ff] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); #else pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; PTE_SYNC(&pte[l2pte_index(va)]); #endif va += PAGE_SIZE; pa += PAGE_SIZE; resid -= PAGE_SIZE; } #ifdef VERBOSE_INIT_ARM printf("\n"); #endif return (size); } /********************** Static device map routines ***************************/ static const struct pmap_devmap *pmap_devmap_table; /* * Register the devmap table. This is provided in case early console * initialization needs to register mappings created by bootstrap code * before pmap_devmap_bootstrap() is called. */ void pmap_devmap_register(const struct pmap_devmap *table) { pmap_devmap_table = table; } /* * Map all of the static regions in the devmap table, and remember * the devmap table so other parts of the kernel can look up entries * later. */ void pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table) { int i; pmap_devmap_table = table; for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { #ifdef VERBOSE_INIT_ARM printf("devmap: %08lx -> %08lx @ %08lx\n", pmap_devmap_table[i].pd_pa, pmap_devmap_table[i].pd_pa + pmap_devmap_table[i].pd_size - 1, pmap_devmap_table[i].pd_va); #endif pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, pmap_devmap_table[i].pd_pa, pmap_devmap_table[i].pd_size, pmap_devmap_table[i].pd_prot, pmap_devmap_table[i].pd_cache); } } const struct pmap_devmap * pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size) { int i; if (pmap_devmap_table == NULL) return (NULL); for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { if (pa >= pmap_devmap_table[i].pd_pa && pa + size <= pmap_devmap_table[i].pd_pa + pmap_devmap_table[i].pd_size) return (&pmap_devmap_table[i]); } return (NULL); } const struct pmap_devmap * pmap_devmap_find_va(vm_offset_t va, vm_size_t size) { int i; if (pmap_devmap_table == NULL) return (NULL); for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { if (va >= pmap_devmap_table[i].pd_va && va + size <= pmap_devmap_table[i].pd_va + pmap_devmap_table[i].pd_size) return (&pmap_devmap_table[i]); } return (NULL); } Index: stable/6/sys/i386/i386/pmap.c =================================================================== --- stable/6/sys/i386/i386/pmap.c (revision 173366) +++ stable/6/sys/i386/i386/pmap.c (revision 173367) @@ -1,3339 +1,3337 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2005 Alan L. Cox * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include "opt_cpu.h" #include "opt_pmap.h" #include "opt_msgbuf.h" #include "opt_xbox.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #ifdef XBOX #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) #define CPU_ENABLE_SSE #endif #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if defined(DIAGNOSTIC) #define PMAP_DIAGNOSTIC #endif #if !defined(PMAP_DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif /* * Get PDEs and PTEs for user/kernel address space */ #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ atomic_clear_int((u_int *)(pte), PG_W)) #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) struct pmap kernel_pmap_store; LIST_HEAD(pmaplist, pmap); static struct pmaplist allpmaps; static struct mtx allpmaps_lock; vm_paddr_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ int pgeflag = 0; /* PG_G or-in */ int pseflag = 0; /* PG_PS or-in */ static int nkpt; vm_offset_t kernel_vm_end; extern u_int32_t KERNend; #ifdef PAE static uma_zone_t pdptzone; #endif /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; int pmap_pagedaemon_waken; /* * All those kernel PT submaps that BSD is so fond of */ struct sysmaps { struct mtx lock; pt_entry_t *CMAP1; pt_entry_t *CMAP2; caddr_t CADDR1; caddr_t CADDR2; }; static struct sysmaps sysmaps_pcpu[MAXCPU]; pt_entry_t *CMAP1 = 0; static pt_entry_t *CMAP3; caddr_t CADDR1 = 0, ptvmmap = 0; static caddr_t CADDR3; struct msgbuf *msgbufp = 0; /* * Crashdump maps. */ static caddr_t crashdumpmap; #ifdef SMP extern pt_entry_t *SMPpt; #endif static pt_entry_t *PMAP1 = 0, *PMAP2; static pt_entry_t *PADDR1 = 0, *PADDR2; #ifdef SMP static int PMAP1cpu; static int PMAP1changedcpu; SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, &PMAP1changedcpu, 0, "Number of times pmap_pte_quick changed CPU with same PMAP1"); #endif static int PMAP1changed; SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, &PMAP1changed, 0, "Number of times pmap_pte_quick changed PMAP1"); static int PMAP1unchanged; SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, &PMAP1unchanged, 0, "Number of times pmap_pte_quick didn't change PMAP1"); static struct mtx PMAP2mutex; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); static void pmap_clear_ptes(vm_page_t m, int bit); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, vm_page_t *free); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static void pmap_pte_release(pt_entry_t *pte); static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); #ifdef PAE static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); #endif CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); /* * Move the kernel virtual free pointer to the next * 4MB. This is used to help improve performance * by using a large (4MB) page for much of the kernel * (.text, .data, .bss) */ static vm_offset_t pmap_kmem_choose(vm_offset_t addr) { vm_offset_t newaddr = addr; #ifndef DISABLE_PSE if (cpu_feature & CPUID_PSE) newaddr = (addr + PDRMASK) & ~PDRMASK; #endif return newaddr; } /* * Bootstrap the system enough to run with virtual memory. * * On the i386 this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address * from the linked base (virtual) address "KERNBASE" to the actual * (physical) address starting relative to 0] */ void pmap_bootstrap(firstaddr, loadaddr) vm_paddr_t firstaddr; vm_paddr_t loadaddr; { vm_offset_t va; pt_entry_t *pte, *unused; struct sysmaps *sysmaps; int i; /* * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too * large. It should instead be correctly calculated in locore.s and * not based on 'first' (which is a physical address, not a virtual * address, for the start of unused physical memory). The kernel * page tables are NOT double mapped and thus should not be included * in this calculation. */ virtual_avail = (vm_offset_t) KERNBASE + firstaddr; virtual_avail = pmap_kmem_choose(virtual_avail); virtual_end = VM_MAX_KERNEL_ADDRESS; /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); #ifdef PAE kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); #endif kernel_pmap->pm_active = -1; /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvlist); LIST_INIT(&allpmaps); mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); mtx_lock_spin(&allpmaps_lock); LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); nkpt = NKPT; /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); va = virtual_avail; pte = vtopte(va); /* * CMAP1/CMAP2 are used for zeroing and copying pages. * CMAP3 is used for the idle process page zeroing. */ for (i = 0; i < MAXCPU; i++) { sysmaps = &sysmaps_pcpu[i]; mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) } SYSMAP(caddr_t, CMAP1, CADDR1, 1) SYSMAP(caddr_t, CMAP3, CADDR3, 1) *CMAP3 = 0; /* * Crashdump maps. */ SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) /* * ptvmmap is used for reading arbitrary physical pages via /dev/mem. */ SYSMAP(caddr_t, unused, ptvmmap, 1) /* * msgbufp is used to map the system message buffer. */ SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) /* * ptemap is used for pmap_pte_quick */ SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); virtual_avail = va; *CMAP1 = 0; #ifdef XBOX /* FIXME: This is gross, but needed for the XBOX. Since we are in such * an early stadium, we cannot yet neatly map video memory ... :-( * Better fixes are very welcome! */ if (!arch_i386_is_xbox) #endif for (i = 0; i < NKPT; i++) PTD[i] = 0; /* Initialize the PAT MSR if present. */ pmap_init_pat(); /* Turn on PG_G on kernel page(s) */ pmap_set_pg(); } /* * Setup the PAT MSR. */ void pmap_init_pat(void) { uint64_t pat_msr; /* Bail if this CPU doesn't implement PAT. */ if (!(cpu_feature & CPUID_PAT)) return; #ifdef PAT_WORKS /* * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. * Program 4 and 5 as WP and WC. * Leave 6 and 7 as UC and UC-. */ pat_msr = rdmsr(MSR_PAT); pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | PAT_VALUE(5, PAT_WRITE_COMBINING); #else /* * Due to some Intel errata, we can only safely use the lower 4 * PAT entries. Thus, just replace PAT Index 2 with WC instead * of UC-. * * Intel Pentium III Processor Specification Update * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B * or Mode C Paging) * * Intel Pentium IV Processor Specification Update * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) */ pat_msr = rdmsr(MSR_PAT); pat_msr &= ~PAT_MASK(2); pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); #endif wrmsr(MSR_PAT, pat_msr); } /* * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. */ void pmap_set_pg(void) { pd_entry_t pdir; pt_entry_t *pte; vm_offset_t va, endva; int i; if (pgeflag == 0) return; i = KERNLOAD/NBPDR; endva = KERNBASE + KERNend; if (pseflag) { va = KERNBASE + KERNLOAD; while (va < endva) { pdir = kernel_pmap->pm_pdir[KPTDI+i]; pdir |= pgeflag; kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; invltlb(); /* Play it safe, invltlb() every time */ i++; va += NBPDR; } } else { va = (vm_offset_t)btext; while (va < endva) { pte = vtopte(va); if (*pte) *pte |= pgeflag; invltlb(); /* Play it safe, invltlb() every time */ va += PAGE_SIZE; } } } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } #ifdef PAE static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt"); static void * pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { *flags = UMA_SLAB_PRIV; return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL, 1, 0)); } #endif /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); #ifdef PAE pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, UMA_ZONE_VM | UMA_ZONE_NOFREE); uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); #endif } void pmap_init2() { } /*************************************************** * Low level helper routines..... ***************************************************/ /* * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ static int pmap_cache_bits(int mode, boolean_t is_pde) { int pat_flag, pat_index, cache_bits; /* The PAT bit is different for PTE's and PDE's. */ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; /* If we don't support PAT, map extended modes to older ones. */ if (!(cpu_feature & CPUID_PAT)) { switch (mode) { case PAT_UNCACHEABLE: case PAT_WRITE_THROUGH: case PAT_WRITE_BACK: break; case PAT_UNCACHED: case PAT_WRITE_COMBINING: case PAT_WRITE_PROTECTED: mode = PAT_UNCACHEABLE; break; } } /* Map the caching mode to a PAT index. */ switch (mode) { #ifdef PAT_WORKS case PAT_UNCACHEABLE: pat_index = 3; break; case PAT_WRITE_THROUGH: pat_index = 1; break; case PAT_WRITE_BACK: pat_index = 0; break; case PAT_UNCACHED: pat_index = 2; break; case PAT_WRITE_COMBINING: pat_index = 5; break; case PAT_WRITE_PROTECTED: pat_index = 4; break; #else case PAT_UNCACHED: case PAT_UNCACHEABLE: case PAT_WRITE_PROTECTED: pat_index = 3; break; case PAT_WRITE_THROUGH: pat_index = 1; break; case PAT_WRITE_BACK: pat_index = 0; break; case PAT_WRITE_COMBINING: pat_index = 2; break; #endif default: panic("Unknown caching mode %d\n", mode); } /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ cache_bits = 0; if (pat_index & 0x4) cache_bits |= pat_flag; if (pat_index & 0x2) cache_bits |= PG_NC_PCD; if (pat_index & 0x1) cache_bits |= PG_NC_PWT; return (cache_bits); } #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. */ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { u_int cpumask; u_int other_cpus; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { invlpg(va); smp_invlpg(va); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) invlpg(va); if (pmap->pm_active & other_cpus) smp_masked_invlpg(pmap->pm_active & other_cpus, va); } sched_unpin(); } void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { u_int cpumask; u_int other_cpus; vm_offset_t addr; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); smp_invlpg_range(sva, eva); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); if (pmap->pm_active & other_cpus) smp_masked_invlpg_range(pmap->pm_active & other_cpus, sva, eva); } sched_unpin(); } void pmap_invalidate_all(pmap_t pmap) { u_int cpumask; u_int other_cpus; sched_pin(); if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { invltlb(); smp_invltlb(); } else { cpumask = PCPU_GET(cpumask); other_cpus = PCPU_GET(other_cpus); if (pmap->pm_active & cpumask) invltlb(); if (pmap->pm_active & other_cpus) smp_masked_invltlb(pmap->pm_active & other_cpus); } sched_unpin(); } void pmap_invalidate_cache(void) { sched_pin(); wbinvd(); smp_cache_flush(); sched_unpin(); } #else /* !SMP */ /* * Normal, non-SMP, 486+ invalidation functions. * We inline these within pmap.c for speed. */ PMAP_INLINE void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { if (pmap == kernel_pmap || pmap->pm_active) invlpg(va); } PMAP_INLINE void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; if (pmap == kernel_pmap || pmap->pm_active) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); } PMAP_INLINE void pmap_invalidate_all(pmap_t pmap) { if (pmap == kernel_pmap || pmap->pm_active) invltlb(); } PMAP_INLINE void pmap_invalidate_cache(void) { wbinvd(); } #endif /* !SMP */ /* * Are we current address space or kernel? N.B. We return FALSE when * a pmap's page table is in use because a kernel thread is borrowing * it. The borrowed page table can change spontaneously, making any * dependence on its continued use subject to a race condition. */ static __inline int pmap_is_current(pmap_t pmap) { return (pmap == kernel_pmap || (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); } /* * If the given pmap is not the current or kernel pmap, the returned pte must * be released by passing it to pmap_pte_release(). */ pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va) { pd_entry_t newpf; pd_entry_t *pde; pde = pmap_pde(pmap, va); if (*pde & PG_PS) return (pde); if (*pde != 0) { /* are we current address space or kernel? */ if (pmap_is_current(pmap)) return (vtopte(va)); mtx_lock(&PMAP2mutex); newpf = *pde & PG_FRAME; if ((*PMAP2 & PG_FRAME) != newpf) { *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); } return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); } return (0); } /* * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte * being NULL. */ static __inline void pmap_pte_release(pt_entry_t *pte) { if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) mtx_unlock(&PMAP2mutex); } static __inline void invlcaddr(void *caddr) { invlpg((u_int)caddr); } /* * Super fast pmap_pte routine best used when scanning * the pv lists. This eliminates many coarse-grained * invltlb calls. Note that many of the pv list * scans are across different pmaps. It is very wasteful * to do an entire invltlb for checking a single mapping. * * If the given pmap is not the current pmap, vm_page_queue_mtx * must be held and curthread pinned to a CPU. */ static pt_entry_t * pmap_pte_quick(pmap_t pmap, vm_offset_t va) { pd_entry_t newpf; pd_entry_t *pde; pde = pmap_pde(pmap, va); if (*pde & PG_PS) return (pde); if (*pde != 0) { /* are we current address space or kernel? */ if (pmap_is_current(pmap)) return (vtopte(va)); mtx_assert(&vm_page_queue_mtx, MA_OWNED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); newpf = *pde & PG_FRAME; if ((*PMAP1 & PG_FRAME) != newpf) { *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; #ifdef SMP PMAP1cpu = PCPU_GET(cpuid); #endif invlcaddr(PADDR1); PMAP1changed++; } else #ifdef SMP if (PMAP1cpu != PCPU_GET(cpuid)) { PMAP1cpu = PCPU_GET(cpuid); invlcaddr(PADDR1); PMAP1changedcpu++; } else #endif PMAP1unchanged++; return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); } return (0); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { vm_paddr_t rtval; pt_entry_t *pte; pd_entry_t pde; rtval = 0; PMAP_LOCK(pmap); pde = pmap->pm_pdir[va >> PDRSHIFT]; if (pde != 0) { if ((pde & PG_PS) != 0) { rtval = (pde & ~PDRMASK) | (va & PDRMASK); PMAP_UNLOCK(pmap); return rtval; } pte = pmap_pte(pmap, va); rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); pmap_pte_release(pte); } PMAP_UNLOCK(pmap); return (rtval); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pd_entry_t pde; pt_entry_t pte; vm_page_t m; m = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); pde = *pmap_pde(pmap, va); if (pde != 0) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) | (va & PDRMASK)); vm_page_hold(m); } } else { sched_pin(); pte = *pmap_pte_quick(pmap, va); if (pte != 0 && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } sched_unpin(); } } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); return (m); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a wired page to the kva. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | pgeflag); } PMAP_INLINE void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); } /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; pte = vtopte(va); pte_clear(pte); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { vm_offset_t va, sva; va = sva = *virt; while (start < end) { pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); *virt = va; return (sva); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pt_entry_t *endpte, oldpte, *pte; oldpte = 0; pte = vtopte(sva); endpte = pte + count; while (pte < endpte) { oldpte |= *pte; pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V); pte++; ma++; } if ((oldpte & PG_V) != 0) pmap_invalidate_range(kernel_pmap, sva, sva + count * PAGE_SIZE); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ static PMAP_INLINE void pmap_free_zero_pages(vm_page_t free) { vm_page_t m; while (free != NULL) { m = free; free = m->right; vm_page_free_zero(m); } } /* * This routine unholds page table pages, and if the hold count * drops to zero, then it decrements the wire count. */ static PMAP_INLINE int pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) { --m->wire_count; if (m->wire_count == 0) return _pmap_unwire_pte_hold(pmap, m, free); else return 0; } static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) { vm_offset_t pteva; /* * unmap the page table page */ pmap->pm_pdir[m->pindex] = 0; --pmap->pm_stats.resident_count; atomic_subtract_int(&cnt.v_wire_count, 1); /* * Do an invltlb to make the invalidated mapping * take effect immediately. */ pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); pmap_invalidate_page(pmap, pteva); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ m->right = *free; *free = m; return 1; } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) { pd_entry_t ptepde; vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) return 0; ptepde = *pmap_pde(pmap, va); mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); return pmap_unwire_pte_hold(pmap, mpte, free); } void pmap_pinit0(pmap) struct pmap *pmap; { PMAP_LOCK_INIT(pmap); pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); #ifdef PAE pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); #endif pmap->pm_active = 0; PCPU_SET(curpmap, pmap); TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); mtx_lock_spin(&allpmaps_lock); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(pmap) register struct pmap *pmap; { vm_page_t m, ptdpg[NPGPTD]; vm_paddr_t pa; static int color; int i; PMAP_LOCK_INIT(pmap); /* * No need to allocate page table space yet but we do need a valid * page directory table. */ if (pmap->pm_pdir == NULL) { pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, NBPTD); #ifdef PAE pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); KASSERT(((vm_offset_t)pmap->pm_pdpt & ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, ("pmap_pinit: pdpt misaligned")); KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), ("pmap_pinit: pdpt above 4g")); #endif } /* * allocate the page directory page(s) */ for (i = 0; i < NPGPTD;) { m = vm_page_alloc(NULL, color++, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) VM_WAIT; else { ptdpg[i++] = m; } } pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); for (i = 0; i < NPGPTD; i++) { if ((ptdpg[i]->flags & PG_ZERO) == 0) bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE); } mtx_lock_spin(&allpmaps_lock); LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); /* Wire in kernel global address entries. */ /* XXX copies current process, does not fill in MPPTDI */ bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); #ifdef SMP pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; #endif /* install self-referential address mapping entry(s) */ for (i = 0; i < NPGPTD; i++) { pa = VM_PAGE_TO_PHYS(ptdpg[i]); pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; #ifdef PAE pmap->pm_pdpt[i] = pa | PG_V; #endif } pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } /* * this routine is called if the page table page is not * mapped correctly. */ static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) { vm_paddr_t ptepa; vm_page_t m; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); vm_page_unlock_queues(); VM_WAIT; vm_page_lock_queues(); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ pmap->pm_stats.resident_count++; ptepa = VM_PAGE_TO_PHYS(m); pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); return m; } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) { unsigned ptepindex; pd_entry_t ptepa; vm_page_t m; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); /* * Calculate pagetable page index */ ptepindex = va >> PDRSHIFT; retry: /* * Get the page directory entry */ ptepa = pmap->pm_pdir[ptepindex]; /* * This supports switching from a 4MB page to a * normal 4K page. */ if (ptepa & PG_PS) { pmap->pm_pdir[ptepindex] = 0; ptepa = 0; pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; pmap_invalidate_all(kernel_pmap); } /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (ptepa) { m = PHYS_TO_VM_PAGE(ptepa); m->wire_count++; } else { /* * Here if the pte page isn't mapped, or if it has * been deallocated. */ m = _pmap_allocpte(pmap, ptepindex, flags); if (m == NULL && (flags & M_WAITOK)) goto retry; } return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ #ifdef SMP /* * Deal with a SMP shootdown of other users of the pmap that we are * trying to dispose of. This can be a bit hairy. */ static u_int *lazymask; static u_int lazyptd; static volatile u_int lazywait; void pmap_lazyfix_action(void); void pmap_lazyfix_action(void) { u_int mymask = PCPU_GET(cpumask); if (rcr3() == lazyptd) load_cr3(PCPU_GET(curpcb)->pcb_cr3); atomic_clear_int(lazymask, mymask); atomic_store_rel_int(&lazywait, 1); } static void pmap_lazyfix_self(u_int mymask) { if (rcr3() == lazyptd) load_cr3(PCPU_GET(curpcb)->pcb_cr3); atomic_clear_int(lazymask, mymask); } static void pmap_lazyfix(pmap_t pmap) { u_int mymask; u_int mask; register u_int spins; while ((mask = pmap->pm_active) != 0) { spins = 50000000; mask = mask & -mask; /* Find least significant set bit */ mtx_lock_spin(&smp_ipi_mtx); #ifdef PAE lazyptd = vtophys(pmap->pm_pdpt); #else lazyptd = vtophys(pmap->pm_pdir); #endif mymask = PCPU_GET(cpumask); if (mask == mymask) { lazymask = &pmap->pm_active; pmap_lazyfix_self(mymask); } else { atomic_store_rel_int((u_int *)&lazymask, (u_int)&pmap->pm_active); atomic_store_rel_int(&lazywait, 0); ipi_selected(mask, IPI_LAZYPMAP); while (lazywait == 0) { ia32_pause(); if (--spins == 0) break; } } mtx_unlock_spin(&smp_ipi_mtx); if (spins == 0) printf("pmap_lazyfix: spun for 50000000\n"); } } #else /* SMP */ /* * Cleaning up on uniprocessor is easy. For various reasons, we're * unlikely to have to even execute this code, including the fact * that the cleanup is deferred until the parent does a wait(2), which * means that another userland process has run. */ static void pmap_lazyfix(pmap_t pmap) { u_int cr3; cr3 = vtophys(pmap->pm_pdir); if (cr3 == rcr3()) { load_cr3(PCPU_GET(curpcb)->pcb_cr3); pmap->pm_active &= ~(PCPU_GET(cpumask)); } } #endif /* SMP */ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m, ptdpg[NPGPTD]; int i; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); pmap_lazyfix(pmap); mtx_lock_spin(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); for (i = 0; i < NPGPTD; i++) ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i]); bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * sizeof(*pmap->pm_pdir)); #ifdef SMP pmap->pm_pdir[MPPTDI] = 0; #endif pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); vm_page_lock_queues(); for (i = 0; i < NPGPTD; i++) { m = ptdpg[i]; #ifdef PAE KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), ("pmap_release: got wrong ptd page")); #endif m->wire_count--; atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free_zero(m); } vm_page_unlock_queues(); PMAP_LOCK_DESTROY(pmap); } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "IU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "IU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { struct pmap *pmap; vm_paddr_t ptppaddr; vm_page_t nkpg; pd_entry_t newpdir; pt_entry_t *pde; mtx_assert(&kernel_map->system_mtx, MA_OWNED); if (kernel_vm_end == 0) { kernel_vm_end = KERNBASE; nkpt = 0; while (pdir_pde(PTD, kernel_vm_end)) { kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); nkpt++; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } addr = roundup2(addr, PAGE_SIZE * NPTEPG); if (addr - 1 >= kernel_map->max_offset) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { if (pdir_pde(PTD, kernel_vm_end)) { kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } continue; } /* * This index is bogus, but out of the way */ nkpg = vm_page_alloc(NULL, nkpt, VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nkpt++; pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(PTD, kernel_vm_end) = newpdir; mtx_lock_spin(&allpmaps_lock); LIST_FOREACH(pmap, &allpmaps, pm_list) { pde = pmap_pde(pmap, kernel_vm_end); pde_store(pde, newpdir); } mtx_unlock_spin(&allpmaps_lock); kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t get_pv_entry(void) { pv_entry_count++; if ((pv_entry_count > pv_entry_high_water) && (pmap_pagedaemon_waken == 0)) { pmap_pagedaemon_waken = 1; wakeup (&vm_pages_needed); } return uma_zalloc(pvzone, M_NOWAIT); } static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } KASSERT(pv != NULL, ("pmap_remove_entry: pv not found")); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(); if (pv == NULL) panic("no pv entries: increase vm.pmap.shpgperproc"); pv->pv_va = va; pv->pv_pmap = pmap; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { pv_entry_count++; pv->pv_va = va; pv->pv_pmap = pmap; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; return (TRUE); } else return (FALSE); } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) { pt_entry_t oldpte; vm_page_t m; mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = pte_load_clear(ptq); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; /* * Machines that don't support invlpg, also don't support * PG_G. */ if (oldpte & PG_G) pmap_invalidate_page(kernel_pmap, va); pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte); if (oldpte & PG_M) { KASSERT((oldpte & PG_RW), ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx", va, (uintmax_t)oldpte)); vm_page_dirty(m); } if (oldpte & PG_A) vm_page_flag_set(m, PG_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va) { pt_entry_t *pte; vm_page_t free = NULL; mtx_assert(&vm_page_queue_mtx, MA_OWNED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) return; pmap_remove_pte(pmap, pte, va, &free); pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t pdnxt; pd_entry_t ptpaddr; pt_entry_t *pte; vm_page_t free = NULL; int anyvalid; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; vm_page_lock_queues(); sched_pin(); PMAP_LOCK(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if ((sva + PAGE_SIZE == eva) && ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { pmap_remove_page(pmap, sva); goto out; } for (; sva < eva; sva = pdnxt) { unsigned pdirindex; /* * Calculate index for next page table. */ pdnxt = (sva + NBPDR) & ~PDRMASK; if (pmap->pm_stats.resident_count == 0) break; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; /* * Weed out invalid mappings. Note: we assume that the page * directory table is always allocated, and in kernel virtual. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { pmap->pm_pdir[pdirindex] = 0; pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; anyvalid = 1; continue; } /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (pdnxt > eva) pdnxt = eva; for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, sva += PAGE_SIZE) { if (*pte == 0) continue; /* * The TLB entry for a PG_G mapping is invalidated * by pmap_remove_pte(). */ if ((*pte & PG_G) == 0) anyvalid = 1; if (pmap_remove_pte(pmap, pte, sva, &free)) break; } } out: sched_unpin(); if (anyvalid) { pmap_invalidate_all(pmap); pmap_free_zero_pages(free); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { register pv_entry_t pv; pt_entry_t *pte, tpte; vm_page_t free; #if defined(PMAP_DIAGNOSTIC) /* * XXX This makes pmap_remove_all() illegal for non-managed pages! */ if (m->flags & PG_FICTITIOUS) { panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m)); } #endif mtx_assert(&vm_page_queue_mtx, MA_OWNED); sched_pin(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { PMAP_LOCK(pv->pv_pmap); pv->pv_pmap->pm_stats.resident_count--; pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); tpte = pte_load_clear(pte); if (tpte & PG_W) pv->pv_pmap->pm_stats.wired_count--; if (tpte & PG_A) vm_page_flag_set(m, PG_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if (tpte & PG_M) { KASSERT((tpte & PG_RW), ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx", pv->pv_va, (uintmax_t)tpte)); vm_page_dirty(m); } free = NULL; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, &free); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); pmap_free_zero_pages(free); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; PMAP_UNLOCK(pv->pv_pmap); free_pv_entry(pv); } vm_page_flag_clear(m, PG_WRITEABLE); sched_unpin(); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t pdnxt; pd_entry_t ptpaddr; pt_entry_t *pte; int anychanged; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; anychanged = 0; vm_page_lock_queues(); sched_pin(); PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { unsigned obits, pbits, pdirindex; pdnxt = (sva + NBPDR) & ~PDRMASK; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; /* * Weed out invalid mappings. Note: we assume that the page * directory table is always allocated, and in kernel virtual. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); anychanged = 1; continue; } if (pdnxt > eva) pdnxt = eva; for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, sva += PAGE_SIZE) { vm_page_t m; retry: /* * Regardless of whether a pte is 32 or 64 bits in * size, PG_RW, PG_A, and PG_M are among the least * significant 32 bits. */ obits = pbits = *(u_int *)pte; if (pbits & PG_MANAGED) { m = NULL; if (pbits & PG_A) { m = PHYS_TO_VM_PAGE(*pte); vm_page_flag_set(m, PG_REFERENCED); pbits &= ~PG_A; } if ((pbits & PG_M) != 0) { if (m == NULL) m = PHYS_TO_VM_PAGE(*pte); vm_page_dirty(m); } } pbits &= ~(PG_RW | PG_M); if (pbits != obits) { if (!atomic_cmpset_int((u_int *)pte, obits, pbits)) goto retry; if (obits & PG_G) pmap_invalidate_page(pmap, sva); else anychanged = 1; } } } sched_unpin(); if (anychanged) pmap_invalidate_all(pmap); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_paddr_t pa; pd_entry_t *pde; register pt_entry_t *pte; vm_paddr_t opa; pt_entry_t origpte, newpte; vm_page_t mpte, om; boolean_t invlva; va &= PG_FRAME; #ifdef PMAP_DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); #endif mpte = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); sched_pin(); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { mpte = pmap_allocpte(pmap, va, M_WAITOK); } #if 0 && defined(PMAP_DIAGNOSTIC) else { pd_entry_t *pdeaddr = pmap_pde(pmap, va); origpte = *pdeaddr; if ((origpte & PG_V) == 0) { panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n", pmap->pm_pdir[PTDPTDI], origpte, va); } } #endif pde = pmap_pde(pmap, va); if ((*pde & PG_PS) != 0) panic("pmap_enter: attempted pmap_enter on 4MB page"); pte = pmap_pte_quick(pmap, va); /* * Page Directory table entry not valid, we need a new PT page */ if (pte == NULL) { panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n", (uintmax_t)pmap->pm_pdir[PTDPTDI], va); } pa = VM_PAGE_TO_PHYS(m); om = NULL; origpte = *pte; opa = origpte & PG_FRAME; /* * Mapping has not changed, must be protection or wiring change. */ if (origpte && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && ((origpte & PG_W) == 0)) pmap->pm_stats.wired_count++; else if (!wired && (origpte & PG_W)) pmap->pm_stats.wired_count--; /* * Remove extra pte reference */ if (mpte) mpte->wire_count--; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { om = m; pa |= PG_MANAGED; } goto validate; } /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. */ if (opa) { if (origpte & PG_W) pmap->pm_stats.wired_count--; if (origpte & PG_MANAGED) { om = PHYS_TO_VM_PAGE(opa); pmap_remove_entry(pmap, om, va); } if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%x", va)); } } else pmap->pm_stats.resident_count++; /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, m); pa |= PG_MANAGED; } /* * Increment counters */ if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. */ newpte = (pt_entry_t)(pa | PG_V); if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; if (wired) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) newpte |= PG_U; if (pmap == kernel_pmap) newpte |= pgeflag; /* * if the mapping or permission bits are different, we need * to update the pte. */ if ((origpte & ~(PG_M|PG_A)) != newpte) { if (origpte & PG_V) { invlva = FALSE; origpte = pte_load_store(pte, newpte | PG_A); if (origpte & PG_A) { if (origpte & PG_MANAGED) vm_page_flag_set(om, PG_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m)) invlva = TRUE; } if (origpte & PG_M) { KASSERT((origpte & PG_RW), ("pmap_enter: modified page not writable: va: %#x, pte: %#jx", va, (uintmax_t)origpte)); if ((origpte & PG_MANAGED) != 0) vm_page_dirty(om); if ((prot & VM_PROT_WRITE) == 0) invlva = TRUE; } if (invlva) pmap_invalidate_page(pmap, va); } else pte_store(pte, newpte | PG_A); } sched_unpin(); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); mpte = NULL; m = m_start; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot, mpte); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ -vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { PMAP_LOCK(pmap); - mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); + (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); PMAP_UNLOCK(pmap); - return (mpte); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { pt_entry_t *pte; vm_paddr_t pa; vm_page_t free; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { unsigned ptepindex; pd_entry_t ptepa; /* * Calculate pagetable page index */ ptepindex = va >> PDRSHIFT; if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { /* * Get the page directory entry */ ptepa = pmap->pm_pdir[ptepindex]; /* * If the page table page is mapped, we just increment * the hold count, and activate it. */ if (ptepa) { if (ptepa & PG_PS) panic("pmap_enter_quick: unexpected mapping into 4MB page"); mpte = PHYS_TO_VM_PAGE(ptepa); mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT); if (mpte == NULL) return (mpte); } } } else { mpte = NULL; } /* * This call to vtopte makes the assumption that we are * entering the page into the current pmap. In order to support * quick entry into any pmap, one would likely use pmap_pte_quick. * But that isn't as quick as vtopte. */ pte = vtopte(va); if (*pte) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; if (pmap_unwire_pte_hold(pmap, mpte, &free)) { pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); } mpte = NULL; } return (mpte); } /* * Increment counters */ pmap->pm_stats.resident_count++; pa = VM_PAGE_TO_PHYS(m); /* * Now validate mapping with RO protection */ if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); return mpte; } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_paddr_t pa, int i) { vm_offset_t va; va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); pmap_kenter(va, pa); invlpg(va); return ((void *)crashdumpmap); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { vm_page_t p; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); if (pseflag && ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { int i; vm_page_t m[1]; unsigned int ptepindex; int npdes; pd_entry_t ptepa; PMAP_LOCK(pmap); if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)]) goto out; PMAP_UNLOCK(pmap); retry: p = vm_page_lookup(object, pindex); if (p != NULL) { vm_page_lock_queues(); if (vm_page_sleep_if_busy(p, FALSE, "init4p")) goto retry; } else { p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); if (p == NULL) return; m[0] = p; if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); return; } p = vm_page_lookup(object, pindex); vm_page_lock_queues(); vm_page_wakeup(p); } vm_page_unlock_queues(); ptepa = VM_PAGE_TO_PHYS(p); if (ptepa & (NBPDR - 1)) return; p->valid = VM_PAGE_BITS_ALL; PMAP_LOCK(pmap); pmap->pm_stats.resident_count += size >> PAGE_SHIFT; npdes = size >> PDRSHIFT; for(i = 0; i < npdes; i++) { pde_store(&pmap->pm_pdir[ptepindex], ptepa | PG_U | PG_RW | PG_V | PG_PS); ptepa += NBPDR; ptepindex += 1; } pmap_invalidate_all(pmap); out: PMAP_UNLOCK(pmap); } } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { register pt_entry_t *pte; PMAP_LOCK(pmap); pte = pmap_pte(pmap, va); if (wired && !pmap_pte_w(pte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_pte_w(pte)) pmap->pm_stats.wired_count--; /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ pmap_pte_set_w(pte, wired); pmap_pte_release(pte); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { vm_page_t free; vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t pdnxt; if (dst_addr != src_addr) return; if (!pmap_is_current(src_pmap)) return; vm_page_lock_queues(); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } sched_pin(); for (addr = src_addr; addr < end_addr; addr = pdnxt) { pt_entry_t *src_pte, *dst_pte; vm_page_t dstmpte, srcmpte; pd_entry_t srcptepaddr; unsigned ptepindex; if (addr >= UPT_MIN_ADDRESS) panic("pmap_copy: invalid to pmap_copy page tables"); pdnxt = (addr + NBPDR) & ~PDRMASK; ptepindex = addr >> PDRSHIFT; srcptepaddr = src_pmap->pm_pdir[ptepindex]; if (srcptepaddr == 0) continue; if (srcptepaddr & PG_PS) { if (dst_pmap->pm_pdir[ptepindex] == 0) { dst_pmap->pm_pdir[ptepindex] = srcptepaddr & ~PG_W; dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; } continue; } srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); if (srcmpte->wire_count == 0) panic("pmap_copy: source page table page is unused"); if (pdnxt > end_addr) pdnxt = end_addr; src_pte = vtopte(addr); while (addr < pdnxt) { pt_entry_t ptetemp; ptetemp = *src_pte; /* * we only virtual copy managed pages */ if ((ptetemp & PG_MANAGED) != 0) { dstmpte = pmap_allocpte(dst_pmap, addr, M_NOWAIT); if (dstmpte == NULL) break; dst_pte = pmap_pte_quick(dst_pmap, addr); if (*dst_pte == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { /* * Clear the wired, modified, and * accessed (referenced) bits * during the copy. */ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); dst_pmap->pm_stats.resident_count++; } else { free = NULL; if (pmap_unwire_pte_hold( dst_pmap, dstmpte, &free)) { pmap_invalidate_page(dst_pmap, addr); pmap_free_zero_pages(free); } } if (dstmpte->wire_count >= srcmpte->wire_count) break; } addr += PAGE_SIZE; src_pte++; } } sched_unpin(); vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } static __inline void pagezero(void *page) { #if defined(I686_CPU) if (cpu_class == CPUCLASS_686) { #if defined(CPU_ENABLE_SSE) if (cpu_feature & CPUID_SSE2) sse2_pagezero(page); else #endif i686_pagezero(page); } else #endif bzero(page, PAGE_SIZE); } /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { struct sysmaps *sysmaps; sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP2) panic("pmap_zero_page: CMAP2 busy"); sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; invlcaddr(sysmaps->CADDR2); pagezero(sysmaps->CADDR2); *sysmaps->CMAP2 = 0; sched_unpin(); mtx_unlock(&sysmaps->lock); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { struct sysmaps *sysmaps; sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP2) panic("pmap_zero_page: CMAP2 busy"); sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; invlcaddr(sysmaps->CADDR2); if (off == 0 && size == PAGE_SIZE) pagezero(sysmaps->CADDR2); else bzero((char *)sysmaps->CADDR2 + off, size); *sysmaps->CMAP2 = 0; sched_unpin(); mtx_unlock(&sysmaps->lock); } /* * pmap_zero_page_idle zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. This * is intended to be called from the vm_pagezero process only and * outside of Giant. */ void pmap_zero_page_idle(vm_page_t m) { if (*CMAP3) panic("pmap_zero_page: CMAP3 busy"); sched_pin(); *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; invlcaddr(CADDR3); pagezero(CADDR3); *CMAP3 = 0; sched_unpin(); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t src, vm_page_t dst) { struct sysmaps *sysmaps; sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP1) panic("pmap_copy_page: CMAP1 busy"); if (*sysmaps->CMAP2) panic("pmap_copy_page: CMAP2 busy"); sched_pin(); invlpg((u_int)sysmaps->CADDR1); invlpg((u_int)sysmaps->CADDR2); *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); *sysmaps->CMAP1 = 0; *sysmaps->CMAP2 = 0; sched_unpin(); mtx_unlock(&sysmaps->lock); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap, m) pmap_t pmap; vm_page_t m; { pv_entry_t pv; int loops = 0; if (m->flags & PG_FICTITIOUS) return FALSE; mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { return TRUE; } loops++; if (loops >= 16) break; } return (FALSE); } #define PMAP_REMOVE_PAGES_CURPROC_ONLY /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap, sva, eva) pmap_t pmap; vm_offset_t sva, eva; { pt_entry_t *pte, tpte; vm_page_t m, free = NULL; pv_entry_t pv, npv; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } #endif vm_page_lock_queues(); PMAP_LOCK(pmap); sched_pin(); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_va >= eva || pv->pv_va < sva) { npv = TAILQ_NEXT(pv, pv_plist); continue; } #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY pte = vtopte(pv->pv_va); #else pte = pmap_pte_quick(pmap, pv->pv_va); #endif tpte = *pte; if (tpte == 0) { printf("TPTE at %p IS ZERO @ VA %08x\n", pte, pv->pv_va); panic("bad pte"); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { npv = TAILQ_NEXT(pv, pv_plist); continue; } m = PHYS_TO_VM_PAGE(tpte); KASSERT(m->phys_addr == (tpte & PG_FRAME), ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT(m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); pmap->pm_stats.resident_count--; pte_clear(pte); /* * Update the vm_page_t clean and reference bits. */ if (tpte & PG_M) { vm_page_dirty(m); } npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); pmap_unuse_pt(pmap, pv->pv_va, &free); free_pv_entry(pv); } sched_unpin(); pmap_invalidate_all(pmap); pmap_free_zero_pages(free); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { pv_entry_t pv; pt_entry_t *pte; boolean_t rv; rv = FALSE; if (m->flags & PG_FICTITIOUS) return (rv); sched_pin(); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); rv = (*pte & PG_M) != 0; PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } sched_unpin(); return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt_entry_t *pte; boolean_t rv; rv = FALSE; PMAP_LOCK(pmap); if (*pmap_pde(pmap, addr)) { pte = vtopte(addr); rv = *pte == 0; } PMAP_UNLOCK(pmap); return (rv); } /* * Clear the given bit in each of the given page's ptes. The bit is * expressed as a 32-bit mask. Consequently, if the pte is 64 bits in * size, only a bit within the least significant 32 can be cleared. */ static __inline void pmap_clear_ptes(vm_page_t m, int bit) { register pv_entry_t pv; pt_entry_t pbits, *pte; if ((m->flags & PG_FICTITIOUS) || (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0)) return; sched_pin(); mtx_assert(&vm_page_queue_mtx, MA_OWNED); /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); retry: pbits = *pte; if (pbits & bit) { if (bit == PG_RW) { /* * Regardless of whether a pte is 32 or 64 bits * in size, PG_RW and PG_M are among the least * significant 32 bits. */ if (!atomic_cmpset_int((u_int *)pte, pbits, pbits & ~(PG_RW | PG_M))) goto retry; if (pbits & PG_M) { vm_page_dirty(m); } } else { atomic_clear_int((u_int *)pte, bit); } pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } PMAP_UNLOCK(pv->pv_pmap); } if (bit == PG_RW) vm_page_flag_clear(m, PG_WRITEABLE); sched_unpin(); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { pmap_clear_ptes(m, PG_RW); } else { pmap_remove_all(m); } } } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { register pv_entry_t pv, pvf, pvn; pt_entry_t *pte; pt_entry_t v; int rtval = 0; if (m->flags & PG_FICTITIOUS) return (rtval); sched_pin(); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; do { pvn = TAILQ_NEXT(pv, pv_list); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); PMAP_LOCK(pv->pv_pmap); pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); if (pte && ((v = pte_load(pte)) & PG_A) != 0) { atomic_clear_int((u_int *)pte, PG_A); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); rtval++; if (rtval > 4) { PMAP_UNLOCK(pv->pv_pmap); break; } } PMAP_UNLOCK(pv->pv_pmap); } while ((pv = pvn) != NULL && pv != pvf); } sched_unpin(); return (rtval); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { pmap_clear_ptes(m, PG_M); } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { pmap_clear_ptes(m, PG_A); } /* * Miscellaneous support routines follow */ /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { vm_offset_t va, tmpva, offset; offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); pa = pa & PG_FRAME; if (pa < KERNLOAD && pa + size <= KERNLOAD) va = KERNBASE + pa; else va = kmem_alloc_nofault(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0; ) { pmap_kenter_attr(tmpva, pa, mode); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); pmap_invalidate_cache(); return ((void *)(va + offset)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); } void pmap_unmapdev(va, size) vm_offset_t va; vm_size_t size; { vm_offset_t base, offset, tmpva; if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) return; base = va & PG_FRAME; offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) pmap_kremove(tmpva); pmap_invalidate_range(kernel_pmap, va, tmpva); kmem_free(kernel_map, base, size); } int pmap_change_attr(va, size, mode) vm_offset_t va; vm_size_t size; int mode; { vm_offset_t base, offset, tmpva; pt_entry_t *pte; u_int opte, npte; pd_entry_t *pde; base = va & PG_FRAME; offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); /* Only supported on kernel virtual addresses. */ if (base <= VM_MAXUSER_ADDRESS) return (EINVAL); /* 4MB pages and pages that aren't mapped aren't supported. */ for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { pde = pmap_pde(kernel_pmap, tmpva); if (*pde & PG_PS) return (EINVAL); if (*pde == 0) return (EINVAL); pte = vtopte(va); if (*pte == 0) return (EINVAL); } /* * Ok, all the pages exist and are 4k, so run through them updating * their cache mode. */ for (tmpva = base; size > 0; ) { pte = vtopte(tmpva); /* * The cache mode bits are all in the low 32-bits of the * PTE, so we can just spin on updating the low 32-bits. */ do { opte = *(u_int *)pte; npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); npte |= pmap_cache_bits(mode, 0); } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); tmpva += PAGE_SIZE; size -= PAGE_SIZE; } /* * Flush CPU caches to make sure any data isn't cached that shouldn't * be, etc. */ pmap_invalidate_range(kernel_pmap, base, tmpva); pmap_invalidate_cache(); return (0); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap, addr) pmap_t pmap; vm_offset_t addr; { pt_entry_t *ptep, pte; vm_page_t m; int val = 0; PMAP_LOCK(pmap); ptep = pmap_pte(pmap, addr); pte = (ptep != NULL) ? *ptep : 0; pmap_pte_release(ptep); PMAP_UNLOCK(pmap); if (pte != 0) { vm_paddr_t pa; val = MINCORE_INCORE; if ((pte & PG_MANAGED) == 0) return val; pa = pte & PG_FRAME; m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if (pte & PG_M) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; else { /* * Modified by someone else */ vm_page_lock_queues(); if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; vm_page_unlock_queues(); } /* * Referenced by us */ if (pte & PG_A) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; else { /* * Referenced by someone else */ vm_page_lock_queues(); if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } vm_page_unlock_queues(); } } return val; } void pmap_activate(struct thread *td) { pmap_t pmap, oldpmap; u_int32_t cr3; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); oldpmap = PCPU_GET(curpmap); #if defined(SMP) atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); #else oldpmap->pm_active &= ~1; pmap->pm_active |= 1; #endif #ifdef PAE cr3 = vtophys(pmap->pm_pdpt); #else cr3 = vtophys(pmap->pm_pdir); #endif /* * pmap_activate is for the current thread on the current cpu */ td->td_pcb->pcb_cr3 = cr3; load_cr3(cr3); PCPU_SET(curpmap, pmap); critical_exit(); } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { return addr; } addr = (addr + PDRMASK) & ~PDRMASK; return addr; } #if defined(PMAP_DEBUG) pmap_pid_dump(int pid) { pmap_t pmap; struct proc *p; int npte = 0; int index; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_pid != pid) continue; if (p->p_vmspace) { int i,j; index = 0; pmap = vmspace_pmap(p->p_vmspace); for (i = 0; i < NPDEPTD; i++) { pd_entry_t *pde; pt_entry_t *pte; vm_offset_t base = i << PDRSHIFT; pde = &pmap->pm_pdir[i]; if (pde && pmap_pde_v(pde)) { for (j = 0; j < NPTEPG; j++) { vm_offset_t va = base + (j << PAGE_SHIFT); if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { if (index) { index = 0; printf("\n"); } sx_sunlock(&allproc_lock); return npte; } pte = pmap_pte(pmap, va); if (pte && pmap_pte_v(pte)) { pt_entry_t pa; vm_page_t m; pa = *pte; m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; if (index >= 2) { index = 0; printf("\n"); } else { printf(" "); } } } } } } } sx_sunlock(&allproc_lock); return npte; } #endif #if defined(DEBUG) static void pads(pmap_t pm); void pmap_pvdump(vm_offset_t pa); /* print address space of pmap*/ static void pads(pm) pmap_t pm; { int i, j; vm_paddr_t va; pt_entry_t *ptep; if (pm == kernel_pmap) return; for (i = 0; i < NPDEPTD; i++) if (pm->pm_pdir[i]) for (j = 0; j < NPTEPG; j++) { va = (i << PDRSHIFT) + (j << PAGE_SHIFT); if (pm == kernel_pmap && va < KERNBASE) continue; if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) continue; ptep = pmap_pte(pm, va); if (pmap_pte_v(ptep)) printf("%x:%x ", va, *ptep); }; } void pmap_pvdump(pa) vm_paddr_t pa; { pv_entry_t pv; vm_page_t m; printf("pa %x", pa); m = PHYS_TO_VM_PAGE(pa); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); pads(pv->pv_pmap); } printf(" "); } #endif Index: stable/6/sys/ia64/ia64/pmap.c =================================================================== --- stable/6/sys/ia64/ia64/pmap.c (revision 173366) +++ stable/6/sys/ia64/ia64/pmap.c (revision 173367) @@ -1,2412 +1,2410 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 1998,2000 Doug Rabson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ /* * Following the Linux model, region IDs are allocated in groups of * eight so that a single region ID can be used for as many RRs as we * want by encoding the RR number into the low bits of the ID. * * We reserve region ID 0 for the kernel and allocate the remaining * IDs for user pmaps. * * Region 0..4 * User virtually mapped * * Region 5 * Kernel virtually mapped * * Region 6 * Kernel physically mapped uncacheable * * Region 7 * Kernel physically mapped cacheable */ /* XXX move to a header. */ extern uint64_t ia64_gateway_page[]; MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures"); #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if !defined(DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED) #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY) #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED) #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK) #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT) #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56) #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED) #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED /* * The VHPT bucket head structure. */ struct ia64_bucket { uint64_t chain; struct mtx mutex; u_int length; }; /* * Statically allocated kernel pmap */ struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ /* * Kernel virtual memory management. */ static int nkpt; struct ia64_lpte **ia64_kptdir; #define KPTE_DIR_INDEX(va) \ ((va >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1)) #define KPTE_PTE_INDEX(va) \ ((va >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1)) #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte)) vm_offset_t kernel_vm_end; /* Values for ptc.e. XXX values for SKI. */ static uint64_t pmap_ptc_e_base = 0x100000000; static uint64_t pmap_ptc_e_count1 = 3; static uint64_t pmap_ptc_e_count2 = 2; static uint64_t pmap_ptc_e_stride1 = 0x2000; static uint64_t pmap_ptc_e_stride2 = 0x100000000; struct mtx pmap_ptcmutex; /* * Data for the RID allocator */ static int pmap_ridcount; static int pmap_rididx; static int pmap_ridmapsz; static int pmap_ridmax; static uint64_t *pmap_ridmap; struct mtx pmap_ridmutex; /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; int pmap_pagedaemon_waken; /* * Data for allocating PTEs for user processes. */ static uma_zone_t ptezone; /* * Virtual Hash Page Table (VHPT) data. */ /* SYSCTL_DECL(_machdep); */ SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, ""); struct ia64_bucket *pmap_vhpt_bucket; int pmap_vhpt_nbuckets; SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD, &pmap_vhpt_nbuckets, 0, ""); uint64_t pmap_vhpt_base[MAXCPU]; int pmap_vhpt_log2size = 0; TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size); SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD, &pmap_vhpt_log2size, 0, ""); static int pmap_vhpt_inserts; SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD, &pmap_vhpt_inserts, 0, ""); static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, pmap_vhpt_population, "I", ""); static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va); static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t locked_pmap); static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot); static pmap_t pmap_install(pmap_t); static void pmap_invalidate_all(pmap_t pmap); static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va, pv_entry_t pv, int freepte); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); vm_offset_t pmap_steal_memory(vm_size_t size) { vm_size_t bank_size; vm_offset_t pa, va; size = round_page(size); bank_size = phys_avail[1] - phys_avail[0]; while (size > bank_size) { int i; for (i = 0; phys_avail[i+2]; i+= 2) { phys_avail[i] = phys_avail[i+2]; phys_avail[i+1] = phys_avail[i+3]; } phys_avail[i] = 0; phys_avail[i+1] = 0; if (!phys_avail[0]) panic("pmap_steal_memory: out of memory"); bank_size = phys_avail[1] - phys_avail[0]; } pa = phys_avail[0]; phys_avail[0] += size; va = IA64_PHYS_TO_RR7(pa); bzero((caddr_t) va, size); return va; } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap() { struct ia64_pal_result res; struct ia64_lpte *pte; vm_offset_t base, limit; size_t size; int i, j, count, ridbits; /* * Query the PAL Code to find the loop parameters for the * ptc.e instruction. */ res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0); if (res.pal_status != 0) panic("Can't configure ptc.e parameters"); pmap_ptc_e_base = res.pal_result[0]; pmap_ptc_e_count1 = res.pal_result[1] >> 32; pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1); pmap_ptc_e_stride1 = res.pal_result[2] >> 32; pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1); if (bootverbose) printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, " "stride1=0x%lx, stride2=0x%lx\n", pmap_ptc_e_base, pmap_ptc_e_count1, pmap_ptc_e_count2, pmap_ptc_e_stride1, pmap_ptc_e_stride2); mtx_init(&pmap_ptcmutex, "Global PTC lock", NULL, MTX_SPIN); /* * Setup RIDs. RIDs 0..7 are reserved for the kernel. * * We currently need at least 19 bits in the RID because PID_MAX * can only be encoded in 17 bits and we need RIDs for 5 regions * per process. With PID_MAX equalling 99999 this means that we * need to be able to encode 499995 (=5*PID_MAX). * The Itanium processor only has 18 bits and the architected * minimum is exactly that. So, we cannot use a PID based scheme * in those cases. Enter pmap_ridmap... * We should avoid the map when running on a processor that has * implemented enough bits. This means that we should pass the * process/thread ID to pmap. This we currently don't do, so we * use the map anyway. However, we don't want to allocate a map * that is large enough to cover the range dictated by the number * of bits in the RID, because that may result in a RID map of * 2MB in size for a 24-bit RID. A 64KB map is enough. * The bottomline: we create a 32KB map when the processor only * implements 18 bits (or when we can't figure it out). Otherwise * we create a 64KB map. */ res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { if (bootverbose) printf("Can't read VM Summary - assuming 18 Region ID bits\n"); ridbits = 18; /* guaranteed minimum */ } else { ridbits = (res.pal_result[1] >> 8) & 0xff; if (bootverbose) printf("Processor supports %d Region ID bits\n", ridbits); } if (ridbits > 19) ridbits = 19; pmap_ridmax = (1 << ridbits); pmap_ridmapsz = pmap_ridmax / 64; pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8); pmap_ridmap[0] |= 0xff; pmap_rididx = 0; pmap_ridcount = 8; mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF); /* * Allocate some memory for initial kernel 'page tables'. */ ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE); for (i = 0; i < NKPT; i++) { ia64_kptdir[i] = (void*)pmap_steal_memory(PAGE_SIZE); } nkpt = NKPT; kernel_vm_end = NKPT * PAGE_SIZE * NKPTEPG + VM_MIN_KERNEL_ADDRESS - VM_GATEWAY_SIZE; for (i = 0; phys_avail[i+2]; i+= 2) ; count = i+2; /* * Figure out a useful size for the VHPT, based on the size of * physical memory and try to locate a region which is large * enough to contain the VHPT (which must be a power of two in * size and aligned to a natural boundary). * We silently bump up the VHPT size to the minimum size if the * user has set the tunable too small. Likewise, the VHPT size * is silently capped to the maximum allowed. */ TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size); if (pmap_vhpt_log2size == 0) { pmap_vhpt_log2size = 15; size = 1UL << pmap_vhpt_log2size; while (size < Maxmem * 32) { pmap_vhpt_log2size++; size <<= 1; } } else if (pmap_vhpt_log2size < 15) pmap_vhpt_log2size = 15; if (pmap_vhpt_log2size > 61) pmap_vhpt_log2size = 61; pmap_vhpt_base[0] = 0; base = limit = 0; size = 1UL << pmap_vhpt_log2size; while (pmap_vhpt_base[0] == 0) { if (bootverbose) printf("Trying VHPT size 0x%lx\n", size); for (i = 0; i < count; i += 2) { base = (phys_avail[i] + size - 1) & ~(size - 1); limit = base + MAXCPU * size; if (limit <= phys_avail[i+1]) /* * VHPT can fit in this region */ break; } if (!phys_avail[i]) { /* Can't fit, try next smaller size. */ pmap_vhpt_log2size--; size >>= 1; } else pmap_vhpt_base[0] = IA64_PHYS_TO_RR7(base); } if (pmap_vhpt_log2size < 15) panic("Can't find space for VHPT"); if (bootverbose) printf("Putting VHPT at 0x%lx\n", base); if (base != phys_avail[i]) { /* Split this region. */ if (bootverbose) printf("Splitting [%p-%p]\n", (void *)phys_avail[i], (void *)phys_avail[i+1]); for (j = count; j > i; j -= 2) { phys_avail[j] = phys_avail[j-2]; phys_avail[j+1] = phys_avail[j-2+1]; } phys_avail[i+1] = base; phys_avail[i+2] = limit; } else phys_avail[i] = limit; pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte); pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets * sizeof(struct ia64_bucket)); pte = (struct ia64_lpte *)pmap_vhpt_base[0]; for (i = 0; i < pmap_vhpt_nbuckets; i++) { pte[i].pte = 0; pte[i].itir = 0; pte[i].tag = 1UL << 63; /* Invalid tag */ pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i); /* Stolen memory is zeroed! */ mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL, MTX_SPIN); } for (i = 1; i < MAXCPU; i++) { pmap_vhpt_base[i] = pmap_vhpt_base[i - 1] + size; bcopy((void *)pmap_vhpt_base[i - 1], (void *)pmap_vhpt_base[i], size); } __asm __volatile("mov cr.pta=%0;; srlz.i;;" :: "r" (pmap_vhpt_base[0] + (1<<8) + (pmap_vhpt_log2size<<2) + 1)); virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); for (i = 0; i < 5; i++) kernel_pmap->pm_rid[i] = 0; kernel_pmap->pm_active = 1; TAILQ_INIT(&kernel_pmap->pm_pvlist); PCPU_SET(current_pmap, kernel_pmap); /* * Region 5 is mapped via the vhpt. */ ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); /* * Region 6 is direct mapped UC and region 7 is direct mapped * WC. The details of this is controlled by the Alt {I,D}TLB * handlers. Here we just make sure that they have the largest * possible page size to minimise TLB usage. */ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (IA64_ID_PAGE_SHIFT << 2)); ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (IA64_ID_PAGE_SHIFT << 2)); /* * Clear out any random TLB entries left over from booting. */ pmap_invalidate_all(kernel_pmap); map_gateway_page(); } static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS) { int count, error, i; count = 0; for (i = 0; i < pmap_vhpt_nbuckets; i++) count += pmap_vhpt_bucket[i].length; error = SYSCTL_OUT(req, &count, sizeof(count)); return (error); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); } void pmap_init2() { } /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ #if 0 static __inline void pmap_invalidate_page_locally(void *arg) { vm_offset_t va = (uintptr_t)arg; struct ia64_lpte *pte; pte = (struct ia64_lpte *)ia64_thash(va); if (pte->tag == ia64_ttag(va)) pte->tag = 1UL << 63; ia64_ptc_l(va, PAGE_SHIFT << 2); } #ifdef SMP static void pmap_invalidate_page_1(void *arg) { void **args = arg; pmap_t oldpmap; critical_enter(); oldpmap = pmap_install(args[0]); pmap_invalidate_page_locally(args[1]); pmap_install(oldpmap); critical_exit(); } #endif static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("invalidating TLB for non-current pmap")); #ifdef SMP if (mp_ncpus > 1) { void *args[2]; args[0] = pmap; args[1] = (void *)va; smp_rendezvous(NULL, pmap_invalidate_page_1, NULL, args); } else #endif pmap_invalidate_page_locally((void *)va); } #endif /* 0 */ static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { struct ia64_lpte *pte; int i, vhpt_ofs; KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("invalidating TLB for non-current pmap")); vhpt_ofs = ia64_thash(va) - pmap_vhpt_base[PCPU_GET(cpuid)]; critical_enter(); for (i = 0; i < MAXCPU; i++) { pte = (struct ia64_lpte *)(pmap_vhpt_base[i] + vhpt_ofs); if (pte->tag == ia64_ttag(va)) pte->tag = 1UL << 63; } critical_exit(); mtx_lock_spin(&pmap_ptcmutex); ia64_ptc_ga(va, PAGE_SHIFT << 2); mtx_unlock_spin(&pmap_ptcmutex); } static void pmap_invalidate_all_1(void *arg) { uint64_t addr; int i, j; critical_enter(); addr = pmap_ptc_e_base; for (i = 0; i < pmap_ptc_e_count1; i++) { for (j = 0; j < pmap_ptc_e_count2; j++) { ia64_ptc_e(addr); addr += pmap_ptc_e_stride2; } addr += pmap_ptc_e_stride1; } critical_exit(); } static void pmap_invalidate_all(pmap_t pmap) { KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("invalidating TLB for non-current pmap")); #ifdef SMP if (mp_ncpus > 1) smp_rendezvous(NULL, pmap_invalidate_all_1, NULL, NULL); else #endif pmap_invalidate_all_1(NULL); } static uint32_t pmap_allocate_rid(void) { uint64_t bit, bits; int rid; mtx_lock(&pmap_ridmutex); if (pmap_ridcount == pmap_ridmax) panic("pmap_allocate_rid: All Region IDs used"); /* Find an index with a free bit. */ while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) { pmap_rididx++; if (pmap_rididx == pmap_ridmapsz) pmap_rididx = 0; } rid = pmap_rididx * 64; /* Find a free bit. */ bit = 1UL; while (bits & bit) { rid++; bit <<= 1; } pmap_ridmap[pmap_rididx] |= bit; pmap_ridcount++; mtx_unlock(&pmap_ridmutex); return rid; } static void pmap_free_rid(uint32_t rid) { uint64_t bit; int idx; idx = rid / 64; bit = ~(1UL << (rid & 63)); mtx_lock(&pmap_ridmutex); pmap_ridmap[idx] &= bit; pmap_ridcount--; mtx_unlock(&pmap_ridmutex); } /*************************************************** * Page table page management routines..... ***************************************************/ void pmap_pinit0(struct pmap *pmap) { /* kernel_pmap is the same as any other pmap. */ pmap_pinit(pmap); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(struct pmap *pmap) { int i; PMAP_LOCK_INIT(pmap); for (i = 0; i < 5; i++) pmap->pm_rid[i] = pmap_allocate_rid(); pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { int i; for (i = 0; i < 5; i++) if (pmap->pm_rid[i]) pmap_free_rid(pmap->pm_rid[i]); PMAP_LOCK_DESTROY(pmap); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { struct ia64_lpte *ptepage; vm_page_t nkpg; if (kernel_vm_end >= addr) return; critical_enter(); while (kernel_vm_end < addr) { /* We could handle more by increasing the size of kptdir. */ if (nkpt == MAXKPT) panic("pmap_growkernel: out of kernel address space"); nkpg = vm_page_alloc(NULL, nkpt, VM_ALLOC_NOOBJ | VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); ptepage = (struct ia64_lpte *) IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg)); bzero(ptepage, PAGE_SIZE); ia64_kptdir[KPTE_DIR_INDEX(kernel_vm_end)] = ptepage; nkpt++; kernel_vm_end += PAGE_SIZE * NKPTEPG; } critical_exit(); } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. */ static pv_entry_t get_pv_entry(pmap_t locked_pmap) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; struct vpgqueues *vpq; struct ia64_lpte *pte; pmap_t oldpmap, pmap; pv_entry_t allocated_pv, next_pv, pv; vm_offset_t va; vm_page_t m; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); allocated_pv = uma_zalloc(pvzone, M_NOWAIT); if (allocated_pv != NULL) { pv_entry_count++; if (pv_entry_count > pv_entry_high_water) pagedaemon_wakeup(); else return (allocated_pv); } /* * Reclaim pv entries: At first, destroy mappings to inactive * pages. After that, if a pv entry is still needed, destroy * mappings to active pages. */ if (ratecheck(&lastprint, &printinterval)) printf("Approaching the limit on PV entries, " "increase the vm.pmap.shpgperproc tunable.\n"); vpq = &vm_page_queues[PQ_INACTIVE]; retry: TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy) continue; TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { va = pv->pv_va; pmap = pv->pv_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) PMAP_LOCK(pmap); else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) continue; oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); pmap_remove_pte(pmap, pte, va, pv, 1); pmap_install(oldpmap); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); if (allocated_pv == NULL) allocated_pv = pv; else free_pv_entry(pv); } } if (allocated_pv == NULL) { if (vpq == &vm_page_queues[PQ_INACTIVE]) { vpq = &vm_page_queues[PQ_ACTIVE]; goto retry; } panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); } return (allocated_pv); } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { pv_entry_count++; pv->pv_va = va; pv->pv_pmap = pmap; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; return (TRUE); } else return (FALSE); } /* * Add an ia64_lpte to the VHPT. */ static void pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *vhpte; uint64_t pte_pa; /* Can fault, so get it out of the way. */ pte_pa = ia64_tpa((vm_offset_t)pte); vhpte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)vhpte->chain; mtx_lock_spin(&bckt->mutex); pte->chain = bckt->chain; ia64_mf(); bckt->chain = pte_pa; pmap_vhpt_inserts++; bckt->length++; mtx_unlock_spin(&bckt->mutex); } /* * Remove the ia64_lpte matching va from the VHPT. Return zero if it * worked or an appropriate error code otherwise. */ static int pmap_remove_vhpt(vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *pte; struct ia64_lpte *lpte; struct ia64_lpte *vhpte; uint64_t chain, tag; tag = ia64_ttag(va); vhpte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)vhpte->chain; lpte = NULL; mtx_lock_spin(&bckt->mutex); chain = bckt->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); while (chain != 0 && pte->tag != tag) { lpte = pte; chain = pte->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); } if (chain == 0) { mtx_unlock_spin(&bckt->mutex); return (ENOENT); } /* Snip this pv_entry out of the collision chain. */ if (lpte == NULL) bckt->chain = pte->chain; else lpte->chain = pte->chain; ia64_mf(); bckt->length--; mtx_unlock_spin(&bckt->mutex); return (0); } /* * Find the ia64_lpte for the given va, if any. */ static struct ia64_lpte * pmap_find_vhpt(vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *pte; uint64_t chain, tag; tag = ia64_ttag(va); pte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)pte->chain; mtx_lock_spin(&bckt->mutex); chain = bckt->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); while (chain != 0 && pte->tag != tag) { chain = pte->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); } mtx_unlock_spin(&bckt->mutex); return ((chain != 0) ? pte : NULL); } /* * Remove an entry from the list of managed mappings. */ static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv) { if (!pv) { if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } } if (pv) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); return 0; } else { return ENOENT; } } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(pmap); pv->pv_pmap = pmap; pv->pv_va = va; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { struct ia64_lpte *pte; pmap_t oldpmap; vm_paddr_t pa; pa = 0; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); if (pte != NULL && pmap_present(pte)) pa = pmap_ppn(pte); pmap_install(oldpmap); PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct ia64_lpte *pte; pmap_t oldpmap; vm_page_t m; m = NULL; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); if (pte != NULL && pmap_present(pte) && (pmap_prot(pte) & prot) == prot) { m = PHYS_TO_VM_PAGE(pmap_ppn(pte)); vm_page_hold(m); } vm_page_unlock_queues(); pmap_install(oldpmap); PMAP_UNLOCK(pmap); return (m); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Find the kernel lpte for mapping the given virtual address, which * must be in the part of region 5 which we can cover with our kernel * 'page tables'. */ static struct ia64_lpte * pmap_find_kpte(vm_offset_t va) { KASSERT((va >> 61) == 5, ("kernel mapping 0x%lx not in region 5", va)); KASSERT(IA64_RR_MASK(va) < (nkpt * PAGE_SIZE * NKPTEPG), ("kernel mapping 0x%lx out of range", va)); return (&ia64_kptdir[KPTE_DIR_INDEX(va)][KPTE_PTE_INDEX(va)]); } /* * Find a pte suitable for mapping a user-space address. If one exists * in the VHPT, that one will be returned, otherwise a new pte is * allocated. */ static struct ia64_lpte * pmap_find_pte(vm_offset_t va) { struct ia64_lpte *pte; if (va >= VM_MAXUSER_ADDRESS) return pmap_find_kpte(va); pte = pmap_find_vhpt(va); if (pte == NULL) { pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO); pte->tag = 1UL << 63; } return (pte); } /* * Free a pte which is now unused. This simply returns it to the zone * allocator if it is a user mapping. For kernel mappings, clear the * valid bit to make it clear that the mapping is not currently used. */ static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va) { if (va < VM_MAXUSER_ADDRESS) uma_zfree(ptezone, pte); else pmap_clear_present(pte); } static PMAP_INLINE void pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot) { static int prot2ar[4] = { PTE_AR_R, /* VM_PROT_NONE */ PTE_AR_RW, /* VM_PROT_WRITE */ PTE_AR_RX, /* VM_PROT_EXECUTE */ PTE_AR_RWX /* VM_PROT_WRITE|VM_PROT_EXECUTE */ }; pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK); pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56; pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap) ? PTE_PL_KERN : PTE_PL_USER; pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1]; } /* * Set a pte to contain a valid mapping and enter it in the VHPT. If * the pte was orginally valid, then its assumed to already be in the * VHPT. * This functions does not set the protection bits. It's expected * that those have been set correctly prior to calling this function. */ static void pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa, boolean_t wired, boolean_t managed) { pte->pte &= PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK; pte->pte |= PTE_PRESENT | PTE_MA_WB; pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED); pte->pte |= (wired) ? PTE_WIRED : 0; pte->pte |= pa & PTE_PPN_MASK; pte->itir = PAGE_SHIFT << 2; pte->tag = ia64_ttag(va); } /* * Remove the (possibly managed) mapping represented by pte from the * given pmap. */ static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va, pv_entry_t pv, int freepte) { int error; vm_page_t m; KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("removing pte for non-current pmap")); /* * First remove from the VHPT. */ error = pmap_remove_vhpt(va); if (error) return (error); pmap_invalidate_page(pmap, va); if (pmap_wired(pte)) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (pmap_managed(pte)) { m = PHYS_TO_VM_PAGE(pmap_ppn(pte)); if (pmap_dirty(pte)) vm_page_dirty(m); if (pmap_accessed(pte)) vm_page_flag_set(m, PG_REFERENCED); error = pmap_remove_entry(pmap, m, va, pv); } if (freepte) pmap_free_pte(pte, va); return (error); } /* * Extract the physical page address associated with a kernel * virtual address. */ vm_paddr_t pmap_kextract(vm_offset_t va) { struct ia64_lpte *pte; vm_offset_t gwpage; KASSERT(va >= IA64_RR_BASE(5), ("Must be kernel VA")); /* Regions 6 and 7 are direct mapped. */ if (va >= IA64_RR_BASE(6)) return (IA64_RR_MASK(va)); /* EPC gateway page? */ gwpage = (vm_offset_t)ia64_get_k5(); if (va >= gwpage && va < gwpage + VM_GATEWAY_SIZE) return (IA64_RR_MASK((vm_offset_t)ia64_gateway_page)); /* Bail out if the virtual address is beyond our limits. */ if (IA64_RR_MASK(va) >= nkpt * PAGE_SIZE * NKPTEPG) return (0); pte = pmap_find_kpte(va); if (!pmap_present(pte)) return (0); return (pmap_ppn(pte) | (va & PAGE_MASK)); } /* * Add a list of wired pages to the kva this routine is only used for * temporary kernel mappings that do not need to have page modification * or references recorded. Note that old mappings are simply written * over. The page is effectively wired, but it's customary to not have * the PTE reflect that, nor update statistics. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { struct ia64_lpte *pte; int i; for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) pmap_invalidate_page(kernel_pmap, va); else pmap_enter_vhpt(pte, va); pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE); va += PAGE_SIZE; } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t va, int count) { struct ia64_lpte *pte; int i; for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) { pmap_remove_vhpt(va); pmap_invalidate_page(kernel_pmap, va); pmap_clear_present(pte); } va += PAGE_SIZE; } } /* * Add a wired page to the kva. As for pmap_qenter(), it's customary * to not have the PTE reflect that, nor update statistics. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { struct ia64_lpte *pte; pte = pmap_find_kpte(va); if (pmap_present(pte)) pmap_invalidate_page(kernel_pmap, va); else pmap_enter_vhpt(pte, va); pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL); pmap_set_pte(pte, va, pa, FALSE, FALSE); } /* * Remove a page from the kva */ void pmap_kremove(vm_offset_t va) { struct ia64_lpte *pte; pte = pmap_find_kpte(va); if (pmap_present(pte)) { pmap_remove_vhpt(va); pmap_invalidate_page(kernel_pmap, va); pmap_clear_present(pte); } } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { return IA64_PHYS_TO_RR7(start); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va) { struct ia64_lpte *pte; KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("removing page for non-current pmap")); pte = pmap_find_vhpt(va); if (pte != NULL) pmap_remove_pte(pmap, pte, va, 0, 1); return; } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_t oldpmap; vm_offset_t va; pv_entry_t npv, pv; struct ia64_lpte *pte; if (pmap->pm_stats.resident_count == 0) return; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pmap_remove_page(pmap, sva); goto out; } if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) { TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, npv) { va = pv->pv_va; if (va >= sva && va < eva) { pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); pmap_remove_pte(pmap, pte, va, pv, 1); } } } else { for (va = sva; va < eva; va = va += PAGE_SIZE) { pte = pmap_find_vhpt(va); if (pte != NULL) pmap_remove_pte(pmap, pte, va, 0, 1); } } out: vm_page_unlock_queues(); pmap_install(oldpmap); PMAP_UNLOCK(pmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pmap_t oldpmap; pv_entry_t pv; #if defined(DIAGNOSTIC) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ if (m->flags & PG_FICTITIOUS) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); } #endif mtx_assert(&vm_page_queue_mtx, MA_OWNED); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { struct ia64_lpte *pte; pmap_t pmap = pv->pv_pmap; vm_offset_t va = pv->pv_va; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m)) panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); pmap_remove_pte(pmap, pte, va, pv, 1); pmap_install(oldpmap); PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pmap_t oldpmap; struct ia64_lpte *pte; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); while (sva < eva) { /* * If page is invalid, skip this page */ pte = pmap_find_vhpt(sva); if (pte == NULL) { sva += PAGE_SIZE; continue; } if (pmap_prot(pte) != prot) { if (pmap_managed(pte)) { vm_offset_t pa = pmap_ppn(pte); vm_page_t m = PHYS_TO_VM_PAGE(pa); if (pmap_dirty(pte)) { vm_page_dirty(m); pmap_clear_dirty(pte); } if (pmap_accessed(pte)) { vm_page_flag_set(m, PG_REFERENCED); pmap_clear_accessed(pte); } } pmap_pte_prot(pmap, pte, prot); pmap_invalidate_page(pmap, sva); } sva += PAGE_SIZE; } vm_page_unlock_queues(); pmap_install(oldpmap); PMAP_UNLOCK(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { pmap_t oldpmap; vm_offset_t pa; vm_offset_t opa; struct ia64_lpte origpte; struct ia64_lpte *pte; boolean_t managed; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); va &= ~PAGE_MASK; #ifdef DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); #endif /* * Find (or create) a pte for the given mapping. */ while ((pte = pmap_find_pte(va)) == NULL) { pmap_install(oldpmap); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); VM_WAIT; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); } origpte = *pte; if (!pmap_present(pte)) { opa = ~0UL; pmap_enter_vhpt(pte, va); } else opa = pmap_ppn(pte); managed = FALSE; pa = VM_PAGE_TO_PHYS(m); /* * Mapping has not changed, must be protection or wiring change. */ if (opa == pa) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && !pmap_wired(&origpte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_wired(&origpte)) pmap->pm_stats.wired_count--; managed = (pmap_managed(&origpte)) ? TRUE : FALSE; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (managed && pmap_dirty(&origpte)) vm_page_dirty(m); pmap_invalidate_page(pmap, va); goto validate; } /* * Mapping has changed, invalidate old range and fall * through to handle validating new mapping. */ if (opa != ~0UL) { pmap_remove_pte(pmap, pte, va, 0, 0); pmap_enter_vhpt(pte, va); } /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, m); managed = TRUE; } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. This * adds the pte to the VHPT if necessary. */ pmap_pte_prot(pmap, pte, prot); pmap_set_pte(pte, va, pa, wired, managed); vm_page_unlock_queues(); pmap_install(oldpmap); PMAP_UNLOCK(pmap); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { pmap_t oldpmap; vm_page_t m; vm_pindex_t diff, psize; VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); m = m_start; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot); m = TAILQ_NEXT(m, listq); } pmap_install(oldpmap); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ -vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { pmap_t oldpmap; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pmap_enter_quick_locked(pmap, va, m, prot); pmap_install(oldpmap); PMAP_UNLOCK(pmap); - return (NULL); } static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct ia64_lpte *pte; boolean_t managed; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((pte = pmap_find_pte(va)) == NULL) return; if (!pmap_present(pte)) { /* Enter on the PV list if the page is managed. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { if (!pmap_try_insert_pv_entry(pmap, va, m)) { pmap_free_pte(pte, va); return; } managed = TRUE; } else managed = FALSE; /* Increment counters. */ pmap->pm_stats.resident_count++; /* Initialise with R/O protection and enter into VHPT. */ pmap_enter_vhpt(pte, va); pmap_pte_prot(pmap, pte, prot & (VM_PROT_READ | VM_PROT_EXECUTE)); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed); } } /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { pmap_t oldpmap; struct ia64_lpte *pte; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); if (wired && !pmap_wired(pte)) { pmap->pm_stats.wired_count++; pmap_set_wired(pte); } else if (!wired && pmap_wired(pte)) { pmap->pm_stats.wired_count--; pmap_clear_wired(pte); } pmap_install(oldpmap); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_idlezero process. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(mdst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops = 0; if (m->flags & PG_FICTITIOUS) return FALSE; /* * Not found, check current mappings returning immediately if found. */ mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { return TRUE; } loops++; if (loops >= 16) break; } return (FALSE); } /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_t oldpmap; pv_entry_t pv, npv; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { struct ia64_lpte *pte; npv = TAILQ_NEXT(pv, pv_plist); if (pv->pv_va >= eva || pv->pv_va < sva) continue; pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (!pmap_wired(pte)) pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1); } pmap_install(oldpmap); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { struct ia64_lpte *pte; pmap_t oldpmap, pmap; pv_entry_t pv; if ((prot & VM_PROT_WRITE) != 0) return; if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { if ((m->flags & PG_WRITEABLE) == 0) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); pmap_pte_prot(pmap, pte, prot); pmap_invalidate_page(pmap, pv->pv_va); pmap_install(oldpmap); PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); } else { pmap_remove_all(m); } } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; int count = 0; if (m->flags & PG_FICTITIOUS) return 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_install(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_accessed(pte)) { count++; pmap_clear_accessed(pte); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } return count; } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; boolean_t rv; rv = FALSE; if (m->flags & PG_FICTITIOUS) return (rv); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_install(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); pmap_install(oldpmap); KASSERT(pte != NULL, ("pte")); rv = pmap_dirty(pte) ? TRUE : FALSE; PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { struct ia64_lpte *pte; pte = pmap_find_vhpt(addr); if (pte != NULL && pmap_present(pte)) return (FALSE); return (TRUE); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; if (m->flags & PG_FICTITIOUS) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_install(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_dirty(pte)) { pmap_clear_dirty(pte); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; if (m->flags & PG_FICTITIOUS) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_install(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_accessed(pte)) { pmap_clear_accessed(pte); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { return (void*) IA64_PHYS_TO_RR6(pa); } /* * 'Unmap' a range mapped by pmap_mapdev(). */ void pmap_unmapdev(vm_offset_t va, vm_size_t size) { return; } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr) { pmap_t oldpmap; struct ia64_lpte *pte, tpte; int val = 0; PMAP_LOCK(pmap); oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(addr); if (pte != NULL) { tpte = *pte; pte = &tpte; } pmap_install(oldpmap); PMAP_UNLOCK(pmap); if (pte == NULL) return 0; if (pmap_present(pte)) { vm_page_t m; vm_offset_t pa; val = MINCORE_INCORE; if (!pmap_managed(pte)) return val; pa = pmap_ppn(pte); m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if (pmap_dirty(pte)) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; else { /* * Modified by someone */ vm_page_lock_queues(); if (pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; vm_page_unlock_queues(); } /* * Referenced by us */ if (pmap_accessed(pte)) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; else { /* * Referenced by someone */ vm_page_lock_queues(); if (pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } vm_page_unlock_queues(); } } return val; } void pmap_activate(struct thread *td) { pmap_install(vmspace_pmap(td->td_proc->p_vmspace)); } pmap_t pmap_switch(pmap_t pm) { pmap_t prevpm; int i; mtx_assert(&sched_lock, MA_OWNED); prevpm = PCPU_GET(current_pmap); if (prevpm == pm) return (prevpm); if (prevpm != NULL) atomic_clear_32(&prevpm->pm_active, PCPU_GET(cpumask)); if (pm == NULL) { for (i = 0; i < 5; i++) { ia64_set_rr(IA64_RR_BASE(i), (i << 8)|(PAGE_SHIFT << 2)|1); } } else { for (i = 0; i < 5; i++) { ia64_set_rr(IA64_RR_BASE(i), (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1); } atomic_set_32(&pm->pm_active, PCPU_GET(cpumask)); } PCPU_SET(current_pmap, pm); __asm __volatile("srlz.d"); return (prevpm); } static pmap_t pmap_install(pmap_t pm) { pmap_t prevpm; mtx_lock_spin(&sched_lock); prevpm = pmap_switch(pm); mtx_unlock_spin(&sched_lock); return (prevpm); } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { return addr; } #include "opt_ddb.h" #ifdef DDB #include static const char* psnames[] = { "1B", "2B", "4B", "8B", "16B", "32B", "64B", "128B", "256B", "512B", "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", "1G", "2G" }; static void print_trs(int type) { struct ia64_pal_result res; int i, maxtr; struct { pt_entry_t pte; uint64_t itir; uint64_t ifa; struct ia64_rr rr; } buf; static const char *manames[] = { "WB", "bad", "bad", "bad", "UC", "UCE", "WC", "NaT", }; res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { db_printf("Can't get VM summary\n"); return; } if (type == 0) maxtr = (res.pal_result[0] >> 40) & 0xff; else maxtr = (res.pal_result[0] >> 32) & 0xff; db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n"); for (i = 0; i <= maxtr; i++) { bzero(&buf, sizeof(buf)); res = ia64_call_pal_stacked_physical (PAL_VM_TR_READ, i, type, ia64_tpa((uint64_t) &buf)); if (!(res.pal_result[0] & 1)) buf.pte &= ~PTE_AR_MASK; if (!(res.pal_result[0] & 2)) buf.pte &= ~PTE_PL_MASK; if (!(res.pal_result[0] & 4)) pmap_clear_dirty(&buf); if (!(res.pal_result[0] & 8)) buf.pte &= ~PTE_MA_MASK; db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s " "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid, buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12, psnames[(buf.itir & ITIR_PS_MASK) >> 2], (buf.pte & PTE_ED) ? 1 : 0, (int)(buf.pte & PTE_AR_MASK) >> 9, (int)(buf.pte & PTE_PL_MASK) >> 7, (pmap_dirty(&buf)) ? 1 : 0, (pmap_accessed(&buf)) ? 1 : 0, manames[(buf.pte & PTE_MA_MASK) >> 2], (pmap_present(&buf)) ? 1 : 0, (int)((buf.itir & ITIR_KEY_MASK) >> 8)); } } DB_COMMAND(itr, db_itr) { print_trs(0); } DB_COMMAND(dtr, db_dtr) { print_trs(1); } DB_COMMAND(rr, db_rr) { int i; uint64_t t; struct ia64_rr rr; printf("RR RID PgSz VE\n"); for (i = 0; i < 8; i++) { __asm __volatile ("mov %0=rr[%1]" : "=r"(t) : "r"(IA64_RR_BASE(i))); *(uint64_t *) &rr = t; printf("%d %06x %4s %d\n", i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve); } } DB_COMMAND(thash, db_thash) { if (!have_addr) return; db_printf("%p\n", (void *) ia64_thash(addr)); } DB_COMMAND(ttag, db_ttag) { if (!have_addr) return; db_printf("0x%lx\n", ia64_ttag(addr)); } DB_COMMAND(kpte, db_kpte) { struct ia64_lpte *pte; if (!have_addr) { db_printf("usage: kpte \n"); return; } if (addr < VM_MIN_KERNEL_ADDRESS) { db_printf("kpte: error: invalid \n"); return; } pte = &ia64_kptdir[KPTE_DIR_INDEX(addr)][KPTE_PTE_INDEX(addr)]; db_printf("kpte at %p:\n", pte); db_printf(" pte =%016lx\n", pte->pte); db_printf(" itir =%016lx\n", pte->itir); db_printf(" tag =%016lx\n", pte->tag); db_printf(" chain=%016lx\n", pte->chain); } #endif Index: stable/6/sys/powerpc/powerpc/pmap.c =================================================================== --- stable/6/sys/powerpc/powerpc/pmap.c (revision 173366) +++ stable/6/sys/powerpc/powerpc/pmap.c (revision 173367) @@ -1,2523 +1,2521 @@ /*- * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Matt Thomas of Allegro Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */ /*- * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #define TODO panic("%s: not implemented", __func__); #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) #define TLBSYNC() __asm __volatile("tlbsync"); #define SYNC() __asm __volatile("sync"); #define EIEIO() __asm __volatile("eieio"); #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) #define VSID_TO_SR(vsid) ((vsid) & 0xf) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ #define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ #define PVO_WIRED 0x010 /* PVO entry is wired */ #define PVO_MANAGED 0x020 /* PVO entry is managed */ #define PVO_EXECUTABLE 0x040 /* PVO entry is executable */ #define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during bootstrap */ #define PVO_FAKE 0x100 /* fictitious phys page */ #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) #define PVO_PTEGIDX_CLR(pvo) \ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) #define PVO_PTEGIDX_SET(pvo, i) \ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PMAP_PVO_CHECK(pvo) struct ofw_map { vm_offset_t om_va; vm_size_t om_len; vm_offset_t om_pa; u_int om_mode; }; int pmap_bootstrapped = 0; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; int pmap_pagedaemon_waken; /* * Map of physical memory regions. */ vm_offset_t phys_avail[128]; u_int phys_avail_count; static struct mem_region *regions; static struct mem_region *pregions; int regions_sz, pregions_sz; static struct ofw_map *translations; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * Kernel pmap. */ struct pmap kernel_pmap_store; extern struct pmap ofw_pmap; /* * Lock for the pteg and pvo tables. */ struct mtx pmap_table_mutex; /* * PTEG data. */ static struct pteg *pmap_pteg_table; u_int pmap_pteg_count; u_int pmap_pteg_mask; /* * PVO data. */ struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ #define BPVO_POOL_SIZE 32768 static struct pvo_entry *pmap_bpvo_pool; static int pmap_bpvo_pool_index = 0; #define VSID_NBPW (sizeof(u_int32_t) * 8) static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; static boolean_t pmap_initialized = FALSE; /* * Statistics. */ u_int pmap_pte_valid = 0; u_int pmap_pte_overflow = 0; u_int pmap_pte_replacements = 0; u_int pmap_pvo_entries = 0; u_int pmap_pvo_enter_calls = 0; u_int pmap_pvo_remove_calls = 0; u_int pmap_pte_spills = 0; SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, &pmap_pte_overflow, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, &pmap_pte_replacements, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, &pmap_pvo_enter_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, &pmap_pvo_remove_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, &pmap_pte_spills, 0, ""); struct pvo_entry *pmap_pvo_zeropage; vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; u_int pmap_rkva_count = 4; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); /* * PTE calls. */ static int pmap_pte_insert(u_int, struct pte *); /* * PVO calls. */ static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, vm_offset_t, vm_offset_t, u_int, int); static void pmap_pvo_remove(struct pvo_entry *, int); static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); static struct pvo_entry *pmap_rkva_alloc(void); static void pmap_pa_map(struct pvo_entry *, vm_offset_t, struct pte *, int *); static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); static void pmap_syncicache(vm_offset_t, vm_size_t); static boolean_t pmap_query_bit(vm_page_t, int); static u_int pmap_clear_bit(vm_page_t, int, int *); static void tlbia(void); static __inline int va_to_sr(u_int *sr, vm_offset_t va) { return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); } static __inline u_int va_to_pteg(u_int sr, vm_offset_t addr) { u_int hash; hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); return (hash & pmap_pteg_mask); } static __inline struct pvo_head * pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pa); if (pg_p != NULL) *pg_p = pg; if (pg == NULL) return (&pmap_pvo_unmanaged); return (&pg->md.mdpg_pvoh); } static __inline struct pvo_head * vm_page_to_pvoh(vm_page_t m) { return (&m->md.mdpg_pvoh); } static __inline void pmap_attr_clear(vm_page_t m, int ptebit) { m->md.mdpg_attrs &= ~ptebit; } static __inline int pmap_attr_fetch(vm_page_t m) { return (m->md.mdpg_attrs); } static __inline void pmap_attr_save(vm_page_t m, int ptebit) { m->md.mdpg_attrs |= ptebit; } static __inline int pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) { if (pt->pte_hi == pvo_pt->pte_hi) return (1); return (0); } static __inline int pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) { return (pt->pte_hi & ~PTE_VALID) == (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | ((va >> ADDR_API_SHFT) & PTE_API) | which); } static __inline void pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) { /* * Construct a PTE. Default to IMB initially. Valid bit only gets * set when the real pte is set in memory. * * Note: Don't set the valid bit for correct operation of tlb update. */ pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); pt->pte_lo = pte_lo; } static __inline void pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); } static __inline void pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) { /* * As shown in Section 7.6.3.2.3 */ pt->pte_lo &= ~ptebit; TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); } static __inline void pmap_pte_set(struct pte *pt, struct pte *pvo_pt) { pvo_pt->pte_hi |= PTE_VALID; /* * Update the PTE as defined in section 7.6.3.1. * Note that the REF/CHG bits are from pvo_pt and thus should havce * been saved so this routine can restore them (if desired). */ pt->pte_lo = pvo_pt->pte_lo; EIEIO(); pt->pte_hi = pvo_pt->pte_hi; SYNC(); pmap_pte_valid++; } static __inline void pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { pvo_pt->pte_hi &= ~PTE_VALID; /* * Force the reg & chg bits back into the PTEs. */ SYNC(); /* * Invalidate the pte. */ pt->pte_hi &= ~PTE_VALID; SYNC(); TLBIE(va); EIEIO(); TLBSYNC(); SYNC(); /* * Save the reg & chg bits. */ pmap_pte_synch(pt, pvo_pt); pmap_pte_valid--; } static __inline void pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) { /* * Invalidate the PTE */ pmap_pte_unset(pt, pvo_pt, va); pmap_pte_set(pt, pvo_pt); } /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *regiona; const struct mem_region *regionb; regiona = a; regionb = b; if (regiona->mr_start < regionb->mr_start) return (-1); else if (regiona->mr_start > regionb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *mapa; const struct ofw_map *mapb; mapa = a; mapb = b; if (mapa->om_pa < mapb->om_pa) return (-1); else if (mapa->om_pa > mapb->om_pa) return (1); else return (0); } void pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen, mmu; int sz; int i, j; int ofw_mappings; vm_size_t size, physsz, hwphyssz; vm_offset_t pa, va, off; u_int batl, batu; /* * Set up BAT0 to map the lowest 256 MB area */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); /* * Map PCI memory space. */ battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); /* * Map obio devices. */ battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); /* * Use an IBAT and a DBAT to map the bottom segment of memory * where we are. */ batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x00000000, BAT_M, BAT_PP_RW); __asm (".balign 32; \n" "mtibatu 0,%0; mtibatl 0,%1; isync; \n" "mtdbatu 0,%0; mtdbatl 0,%1; isync" :: "r"(batu), "r"(batl)); #if 0 /* map frame buffer */ batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" :: "r"(batu), "r"(batl)); #endif #if 1 /* map pci space */ batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" :: "r"(batu), "r"(batl)); #endif /* * Set the start and end of kva. */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); for (i = 0; i < pregions_sz; i++) { vm_offset_t pa; vm_offset_t end; CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", pregions[i].mr_start, pregions[i].mr_start + pregions[i].mr_size, pregions[i].mr_size); /* * Install entries into the BAT table to allow all * of physmem to be convered by on-demand BAT entries. * The loop will sometimes set the same battable element * twice, but that's fine since they won't be used for * a while yet. */ pa = pregions[i].mr_start & 0xf0000000; end = pregions[i].mr_start + pregions[i].mr_size; do { u_int n = pa >> ADDR_SR_SHFT; battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); pa += SEGMENT_LENGTH; } while (pa < end); } if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) panic("pmap_bootstrap: phys_avail too small"); qsort(regions, regions_sz, sizeof(*regions), mr_cmp); phys_avail_count = 0; physsz = 0; hwphyssz = 0; TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); for (i = 0, j = 0; i < regions_sz; i++, j += 2) { CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, regions[i].mr_start + regions[i].mr_size, regions[i].mr_size); if (hwphyssz != 0 && (physsz + regions[i].mr_size) >= hwphyssz) { if (physsz < hwphyssz) { phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + hwphyssz - physsz; physsz = hwphyssz; phys_avail_count++; } break; } phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; phys_avail_count++; physsz += regions[i].mr_size; } physmem = btoc(physsz); /* * Allocate PTEG table. */ #ifdef PTEGCOUNT pmap_pteg_count = PTEGCOUNT; #else pmap_pteg_count = 0x1000; while (pmap_pteg_count < physmem) pmap_pteg_count <<= 1; pmap_pteg_count >>= 1; #endif /* PTEGCOUNT */ size = pmap_pteg_count * sizeof(struct pteg); CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, size); pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); pmap_pteg_mask = pmap_pteg_count - 1; /* * Allocate pv/overflow lists. */ size = sizeof(struct pvo_head) * pmap_pteg_count; pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, PAGE_SIZE); CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); for (i = 0; i < pmap_pteg_count; i++) LIST_INIT(&pmap_pvo_table[i]); /* * Initialize the lock that synchronizes access to the pteg and pvo * tables. */ mtx_init(&pmap_table_mutex, "pmap table", NULL, MTX_DEF); /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); /* * Initialise the unmanaged pvo pool. */ pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); pmap_bpvo_pool_index = 0; /* * Make sure kernel vsid is allocated as well as VSID 0. */ pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); pmap_vsid_bitmap[0] |= 1; /* * Set up the Open Firmware pmap and add it's mappings. */ pmap_pinit(&ofw_pmap); ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; if ((chosen = OF_finddevice("/chosen")) == -1) panic("pmap_bootstrap: can't find /chosen"); OF_getprop(chosen, "mmu", &mmui, 4); if ((mmu = OF_instance_to_package(mmui)) == -1) panic("pmap_bootstrap: can't get mmu package"); if ((sz = OF_getproplen(mmu, "translations")) == -1) panic("pmap_bootstrap: can't get ofw translation count"); translations = NULL; for (i = 0; phys_avail[i] != 0; i += 2) { if (phys_avail[i + 1] >= sz) { translations = (struct ofw_map *)phys_avail[i]; break; } } if (translations == NULL) panic("pmap_bootstrap: no space to copy translations"); bzero(translations, sz); if (OF_getprop(mmu, "translations", translations, sz) == -1) panic("pmap_bootstrap: can't get ofw translations"); CTR0(KTR_PMAP, "pmap_bootstrap: translations"); sz /= sizeof(*translations); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0, ofw_mappings = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", translations[i].om_pa, translations[i].om_va, translations[i].om_len); /* * If the mapping is 1:1, let the RAM and device on-demand * BAT tables take care of the translation. */ if (translations[i].om_va == translations[i].om_pa) continue; /* Enter the pages */ for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { struct vm_page m; m.phys_addr = translations[i].om_pa + off; PMAP_LOCK(&ofw_pmap); pmap_enter_locked(&ofw_pmap, translations[i].om_va + off, &m, VM_PROT_ALL, 1); PMAP_UNLOCK(&ofw_pmap); ofw_mappings++; } } #ifdef SMP TLBSYNC(); #endif /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); for (i = 0; i < 16; i++) { kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; } kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; kernel_pmap->pm_active = ~0; /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. */ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); kstack0_phys = pa; kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, kstack0); virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; pmap_kenter(va, pa); TLBIE(va); } /* * Calculate the last available physical address. */ for (i = 0; phys_avail[i + 2] != 0; i += 2) ; Maxmem = powerpc_btop(phys_avail[i + 1]); /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize hardware. */ for (i = 0; i < 16; i++) { mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); } __asm __volatile ("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); __asm __volatile ("mtsr %0,%1" :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); __asm __volatile ("sync; mtsdr1 %0; isync" :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); tlbia(); pmap_bootstrapped++; } /* * Activate a user pmap. The pmap must be activated before it's address * space can be accessed in any way. */ void pmap_activate(struct thread *td) { pmap_t pm, pmr; /* * Load all the data we need up front to encourage the compiler to * not issue any loads while we have interrupts disabled below. */ pm = &td->td_proc->p_vmspace->vm_pmap; if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) pmr = pm; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(curpmap, pmr); } void pmap_deactivate(struct thread *td) { pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; pm->pm_active &= ~(PCPU_GET(cpumask)); PCPU_SET(curpmap, NULL); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct pvo_entry *pvo; PMAP_LOCK(pm); pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo != NULL) { if (wired) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) pm->pm_stats.wired_count++; pvo->pvo_vaddr |= PVO_WIRED; } else { if ((pvo->pvo_vaddr & PVO_WIRED) != 0) pm->pm_stats.wired_count--; pvo->pvo_vaddr &= ~PVO_WIRED; } } PMAP_UNLOCK(pm); } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { /* * This is not needed as it's mainly an optimisation. * It may want to be implemented later though. */ } void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); kcopy((void *)src, (void *)dst, PAGE_SIZE); } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_page_t m) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va, PAGE_SIZE); if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; if (pa < SEGMENT_LENGTH) { va = (caddr_t) pa; } else if (pmap_initialized) { if (pmap_pvo_zeropage == NULL) pmap_pvo_zeropage = pmap_rkva_alloc(); pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); } else { panic("pmap_zero_page: can't zero pa %#x", pa); } bzero(va + off, size); if (pa >= SEGMENT_LENGTH) pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); } void pmap_zero_page_idle(vm_page_t m) { /* XXX this is called outside of Giant, is pmap_zero_page safe? */ /* XXX maybe have a dedicated mapping for this to avoid the problem? */ mtx_lock(&Giant); pmap_zero_page(m); mtx_unlock(&Giant); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_page_lock_queues(); PMAP_LOCK(pmap); pmap_enter_locked(pmap, va, m, prot, wired); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. * * The page queues and pmap must be locked. */ static void pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct pvo_head *pvo_head; uma_zone_t zone; vm_page_t pg; u_int pte_lo, pvo_flags, was_exec, i; int error; if (!pmap_initialized) { pvo_head = &pmap_pvo_kunmanaged; zone = pmap_upvo_zone; pvo_flags = 0; pg = NULL; was_exec = PTE_EXEC; } else { pvo_head = vm_page_to_pvoh(m); pg = m; zone = pmap_mpvo_zone; pvo_flags = PVO_MANAGED; was_exec = 0; } if (pmap_bootstrapped) mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* XXX change the pvo head for fake pages */ if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) pvo_head = &pmap_pvo_kunmanaged; /* * If this is a managed page, and it's the first reference to the page, * clear the execness of the page. Otherwise fetch the execness. */ if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { if (LIST_EMPTY(pvo_head)) { pmap_attr_clear(pg, PTE_EXEC); } else { was_exec = pmap_attr_fetch(pg) & PTE_EXEC; } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ pte_lo = PTE_I | PTE_G; for (i = 0; i < pregions_sz; i++) { if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && (VM_PAGE_TO_PHYS(m) < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(PTE_I | PTE_G); break; } } if (prot & VM_PROT_WRITE) pte_lo |= PTE_BW; else pte_lo |= PTE_BR; if (prot & VM_PROT_EXECUTE) pvo_flags |= PVO_EXECUTABLE; if (wired) pvo_flags |= PVO_WIRED; if ((m->flags & PG_FICTITIOUS) != 0) pvo_flags |= PVO_FAKE; error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); /* * Flush the real page from the instruction cache if this page is * mapped executable and cacheable and was not previously mapped (or * was not mapped executable). */ if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0 && was_exec == 0) { /* * Flush the real memory from the cache. */ pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); if (pg != NULL) pmap_attr_save(pg, PTE_EXEC); } /* XXX syncicache always until problems are sorted */ pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; psize = atop(end - start); m = m_start; PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pm); } -vm_page_t -pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); PMAP_UNLOCK(pm); - return (NULL); } vm_paddr_t pmap_extract(pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; PMAP_LOCK(pm); pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); if (pvo == NULL) pa = 0; else pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); PMAP_UNLOCK(pm); return (pa); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct pvo_entry *pvo; vm_page_t m; m = NULL; mtx_lock(&Giant); vm_page_lock_queues(); PMAP_LOCK(pmap); pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || (prot & VM_PROT_WRITE) == 0)) { m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); vm_page_hold(m); } vm_page_unlock_queues(); PMAP_UNLOCK(pmap); mtx_unlock(&Giant); return (m); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { } void pmap_init(void) { CTR0(KTR_PMAP, "pmap_init"); pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); pmap_initialized = TRUE; } void pmap_init2(void) { CTR0(KTR_PMAP, "pmap_init2"); } boolean_t pmap_is_modified(vm_page_t m) { if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) return (FALSE); return (pmap_query_bit(m, PTE_CHG)); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { return (FALSE); } void pmap_clear_reference(vm_page_t m) { if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; pmap_clear_bit(m, PTE_REF, NULL); } void pmap_clear_modify(vm_page_t m) { if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; pmap_clear_bit(m, PTE_CHG, NULL); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { int count; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (0); count = pmap_clear_bit(m, PTE_REF, NULL); return (count); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { u_int pte_lo; int error; int i; #if 0 if (va < VM_MIN_KERNEL_ADDRESS) panic("pmap_kenter: attempt to enter non-kernel address %#x", va); #endif pte_lo = PTE_I | PTE_G; for (i = 0; i < pregions_sz; i++) { if ((pa >= pregions[i].mr_start) && (pa < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(PTE_I | PTE_G); break; } } PMAP_LOCK(kernel_pmap); error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); if (error != 0 && error != ENOENT) panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, pa, error); /* * Flush the real memory from the instruction cache. */ if ((pte_lo & (PTE_I | PTE_G)) == 0) { pmap_syncicache(pa, PAGE_SIZE); } PMAP_UNLOCK(kernel_pmap); } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; #ifdef UMA_MD_SMALL_ALLOC /* * Allow direct mappings */ if (va < VM_MIN_KERNEL_ADDRESS) { return (va); } #endif PMAP_LOCK(kernel_pmap); pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); KASSERT(pvo != NULL, ("pmap_kextract: no addr found")); pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); PMAP_UNLOCK(kernel_pmap); return (pa); } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { pmap_remove(kernel_pmap, va, va + PAGE_SIZE); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { vm_offset_t sva, va; sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter(va, pa_start); *virt = va; return (sva); } int pmap_mincore(pmap_t pmap, vm_offset_t addr) { TODO; return (0); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_object_init_pt: non current pmap")); } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; struct pte *pt; pmap_t pmap; /* * Since the routine only downgrades protection, if the * maximal protection is desired, there isn't any change * to be made. */ if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE)) return; pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); PMAP_PVO_CHECK(pvo); /* sanity check */ pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); /* * Downgrading to no mapping at all, we just remove the entry. */ if ((prot & VM_PROT_READ) == 0) { pmap_pvo_remove(pvo, -1); PMAP_UNLOCK(pmap); continue; } /* * If EXEC permission is being revoked, just clear the flag * in the PVO. */ if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * If this entry is already RO, don't diddle with the page * table. */ if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { PMAP_UNLOCK(pmap); PMAP_PVO_CHECK(pvo); continue; } /* * Grab the PTE before we diddle the bits so pvo_to_pte can * verify the pte contents are as expected. */ pt = pmap_pvo_to_pte(pvo, -1); pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PMAP_UNLOCK(pmap); PMAP_PVO_CHECK(pvo); /* sanity check */ } /* * Downgrading from writeable: clear the VM page flag */ if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) vm_page_flag_clear(m, PG_WRITEABLE); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { int loops; struct pvo_entry *pvo; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; loops = 0; LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { if (pvo->pvo_pmap == pmap) return (TRUE); if (++loops >= 16) break; } return (FALSE); } static u_int pmap_vsidcontext; void pmap_pinit(pmap_t pmap) { int i, mask; u_int entropy; KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap")); PMAP_LOCK_INIT(pmap); entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); /* * Allocate some segment registers for this pmap. */ for (i = 0; i < NPMAPS; i += VSID_NBPW) { u_int hash, n; /* * Create a new value by mutiplying by a prime and adding in * entropy from the timebase register. This is to make the * VSID more random so that the PT hash function collides * less often. (Note that the prime casues gcc to do shifts * instead of a multiply.) */ pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; hash = pmap_vsidcontext & (NPMAPS - 1); if (hash == 0) /* 0 is special, avoid it */ continue; n = hash >> 5; mask = 1 << (hash & (VSID_NBPW - 1)); hash = (pmap_vsidcontext & 0xfffff); if (pmap_vsid_bitmap[n] & mask) { /* collision? */ /* anything free in this bucket? */ if (pmap_vsid_bitmap[n] == 0xffffffff) { entropy = (pmap_vsidcontext >> 20); continue; } i = ffs(~pmap_vsid_bitmap[i]) - 1; mask = 1 << i; hash &= 0xfffff & ~(VSID_NBPW - 1); hash |= i; } pmap_vsid_bitmap[n] |= mask; for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); return; } panic("pmap_pinit: out of segments"); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { pmap_pinit(pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo; struct pte *pt; int pteidx; CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { mtx_lock(&Giant); pmap_remove(pm, sva, eva); mtx_unlock(&Giant); return; } mtx_lock(&Giant); vm_page_lock_queues(); PMAP_LOCK(pm); for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo == NULL) continue; if ((prot & VM_PROT_EXECUTE) == 0) pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE * copy. */ pt = pmap_pvo_to_pte(pvo, pteidx); /* * Change the protection of the page. */ pvo->pvo_pte.pte_lo &= ~PTE_PP; pvo->pvo_pte.pte_lo |= PTE_BR; /* * If the PVO is in the page table, update that pte as well. */ if (pt != NULL) pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); } vm_page_unlock_queues(); PMAP_UNLOCK(pm); mtx_unlock(&Giant); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; va = sva; while (count-- > 0) { pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } } void pmap_release(pmap_t pmap) { int idx, mask; /* * Free segment register's VSID */ if (pmap->pm_sr[0] == 0) panic("pmap_release"); idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); mask = 1 << (idx % VSID_NBPW); idx /= VSID_NBPW; pmap_vsid_bitmap[idx] &= ~mask; PMAP_LOCK_DESTROY(pmap); } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo; int pteidx; vm_page_lock_queues(); PMAP_LOCK(pm); for (; sva < eva; sva += PAGE_SIZE) { pvo = pmap_pvo_find_va(pm, sva, &pteidx); if (pvo != NULL) { pmap_pvo_remove(pvo, pteidx); } } PMAP_UNLOCK(pm); vm_page_unlock_queues(); } /* * Remove physical page from all pmaps in which it resides. pmap_pvo_remove() * will reflect changes in pte's back to the vm_page. */ void pmap_remove_all(vm_page_t m) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; pmap_t pmap; mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); PMAP_PVO_CHECK(pvo); /* sanity check */ pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); pmap_pvo_remove(pvo, -1); PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove in the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size, u_int align) { vm_offset_t s, e; int i, j; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (align != 0) s = (phys_avail[i] + align - 1) & ~(align - 1); else s = phys_avail[i]; e = s + size; if (s < phys_avail[i] || e > phys_avail[i + 1]) continue; if (s == phys_avail[i]) { phys_avail[i] += size; } else if (e == phys_avail[i + 1]) { phys_avail[i + 1] -= size; } else { for (j = phys_avail_count * 2; j > i; j -= 2) { phys_avail[j] = phys_avail[j - 2]; phys_avail[j + 1] = phys_avail[j - 1]; } phys_avail[i + 3] = phys_avail[i + 1]; phys_avail[i + 1] = s; phys_avail[i + 2] = e; phys_avail_count++; } return (s); } panic("pmap_bootstrap_alloc: could not allocate memory"); } /* * Return an unmapped pvo for a kernel virtual address. * Used by pmap functions that operate on physical pages. */ static struct pvo_entry * pmap_rkva_alloc(void) { struct pvo_entry *pvo; struct pte *pt; vm_offset_t kva; int pteidx; if (pmap_rkva_count == 0) panic("pmap_rkva_alloc: no more reserved KVAs"); kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); pmap_kenter(kva, 0); pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); if (pvo == NULL) panic("pmap_kva_alloc: pmap_pvo_find_va failed"); pt = pmap_pvo_to_pte(pvo, pteidx); if (pt == NULL) panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; return (pvo); } static void pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, int *depth_p) { struct pte *pt; /* * If this pvo already has a valid pte, we need to save it so it can * be restored later. We then just reload the new PTE over the old * slot. */ if (saved_pt != NULL) { pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } *saved_pt = pvo->pvo_pte; pvo->pvo_pte.pte_lo &= ~PTE_RPGN; } pvo->pvo_pte.pte_lo |= pa; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_map: could not spill pvo %p", pvo); if (depth_p != NULL) (*depth_p)++; } static void pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) { struct pte *pt; pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); pmap_pte_overflow++; } pvo->pvo_pte.pte_lo &= ~PTE_RPGN; /* * If there is a saved PTE and it's valid, restore it and return. */ if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { if (depth_p != NULL && --(*depth_p) == 0) panic("pmap_pa_unmap: restoring but depth == 0"); pvo->pvo_pte = *saved_pt; if (!pmap_pte_spill(pvo->pvo_vaddr)) panic("pmap_pa_unmap: could not spill pvo %p", pvo); } } static void pmap_syncicache(vm_offset_t pa, vm_size_t len) { __syncicache((void *)pa, len); } static void tlbia(void) { caddr_t i; SYNC(); for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { TLBIE(i); EIEIO(); } TLBSYNC(); SYNC(); } static int pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) { struct pvo_entry *pvo; u_int sr; int first; u_int ptegidx; int i; int bootstrap; pmap_pvo_enter_calls++; first = 0; bootstrap = 0; /* * Compute the PTE Group index. */ va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); /* * Remove any existing mapping for this page. Reuse the pvo entry if * there is a mapping. */ mtx_lock(&pmap_table_mutex); LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && (pvo->pvo_pte.pte_lo & PTE_PP) == (pte_lo & PTE_PP)) { mtx_unlock(&pmap_table_mutex); return (0); } pmap_pvo_remove(pvo, -1); break; } } /* * If we aren't overwriting a mapping, try to allocate. */ if (pmap_initialized) { pvo = uma_zalloc(zone, M_NOWAIT); } else { if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", pmap_bpvo_pool_index, BPVO_POOL_SIZE, BPVO_POOL_SIZE * sizeof(struct pvo_entry)); } pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; pmap_bpvo_pool_index++; bootstrap = 1; } if (pvo == NULL) { mtx_unlock(&pmap_table_mutex); return (ENOMEM); } pmap_pvo_entries++; pvo->pvo_vaddr = va; pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; if (flags & VM_PROT_EXECUTE) pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &pmap_pvo_kunmanaged) pvo->pvo_vaddr |= PVO_MANAGED; if (bootstrap) pvo->pvo_vaddr |= PVO_BOOTSTRAP; if (flags & PVO_FAKE) pvo->pvo_vaddr |= PVO_FAKE; pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); /* * Remember if the list was empty and therefore will be the first * item. */ if (LIST_FIRST(pvo_head) == NULL) first = 1; LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); if (pvo->pvo_pte.pte_lo & PVO_WIRED) pm->pm_stats.wired_count++; pm->pm_stats.resident_count++; /* * We hope this succeeds but it isn't required. */ i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (i >= 0) { PVO_PTEGIDX_SET(pvo, i); } else { panic("pmap_pvo_enter: overflow"); pmap_pte_overflow++; } mtx_unlock(&pmap_table_mutex); return (first ? ENOENT : 0); } static void pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If there is an active pte entry, we need to deactivate it (and * save the ref & cfg bits). */ pt = pmap_pvo_to_pte(pvo, pteidx); if (pt != NULL) { pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); PVO_PTEGIDX_CLR(pvo); } else { pmap_pte_overflow--; } /* * Update our statistics. */ pvo->pvo_pmap->pm_stats.resident_count--; if (pvo->pvo_pte.pte_lo & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count--; /* * Save the REF/CHG bits into their cache if the page is managed. */ if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { struct vm_page *pg; pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); if (pg != NULL) { pmap_attr_save(pg, pvo->pvo_pte.pte_lo & (PTE_REF | PTE_CHG)); } } /* * Remove this PVO from the PV list. */ LIST_REMOVE(pvo, pvo_vlink); /* * Remove this from the overflow list and return it to the pool * if we aren't going to reuse it. */ LIST_REMOVE(pvo, pvo_olink); if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone, pvo); pmap_pvo_entries--; pmap_pvo_remove_calls++; } static __inline int pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) { int pteidx; /* * We can find the actual pte entry without searching by grabbing * the PTEG index from 3 unused bits in pte_lo[11:9] and by * noticing the HID bit. */ pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); if (pvo->pvo_pte.pte_hi & PTE_HID) pteidx ^= pmap_pteg_mask * 8; return (pteidx); } static struct pvo_entry * pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) { struct pvo_entry *pvo; int ptegidx; u_int sr; va &= ~ADDR_POFF; sr = va_to_sr(pm->pm_sr, va); ptegidx = va_to_pteg(sr, va); mtx_lock(&pmap_table_mutex); LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if (pteidx_p) *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); break; } } mtx_unlock(&pmap_table_mutex); return (pvo); } static struct pte * pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) { struct pte *pt; /* * If we haven't been supplied the ptegidx, calculate it. */ if (pteidx == -1) { int ptegidx; u_int sr; sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); pteidx = pmap_pvo_pte_index(pvo, ptegidx); } pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " "valid pte index", pvo); } if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " "pvo but no valid pte", pvo); } if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { panic("pmap_pvo_to_pte: pvo %p has valid pte in " "pmap_pteg_table %p but invalid in pvo", pvo, pt); } if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { panic("pmap_pvo_to_pte: pvo %p pte does not match " "pte %p in pmap_pteg_table", pvo, pt); } return (pt); } if (pvo->pvo_pte.pte_hi & PTE_VALID) { panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " "pmap_pteg_table but valid in pvo", pvo, pt); } return (NULL); } /* * XXX: THIS STUFF SHOULD BE IN pte.c? */ int pmap_pte_spill(vm_offset_t addr) { struct pvo_entry *source_pvo, *victim_pvo; struct pvo_entry *pvo; int ptegidx, i, j; u_int sr; struct pteg *pteg; struct pte *pt; pmap_pte_spills++; sr = mfsrin(addr); ptegidx = va_to_pteg(sr, addr); /* * Have to substitute some entry. Use the primary hash for this. * Use low bits of timebase as random generator. */ pteg = &pmap_pteg_table[ptegidx]; mtx_lock(&pmap_table_mutex); __asm __volatile("mftb %0" : "=r"(i)); i &= 7; pt = &pteg->pt[i]; source_pvo = NULL; victim_pvo = NULL; LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { /* * We need to find a pvo entry for this address. */ PMAP_PVO_CHECK(pvo); if (source_pvo == NULL && pmap_pte_match(&pvo->pvo_pte, sr, addr, pvo->pvo_pte.pte_hi & PTE_HID)) { /* * Now found an entry to be spilled into the pteg. * The PTE is now valid, so we know it's active. */ j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); if (j >= 0) { PVO_PTEGIDX_SET(pvo, j); pmap_pte_overflow--; PMAP_PVO_CHECK(pvo); mtx_unlock(&pmap_table_mutex); return (1); } source_pvo = pvo; if (victim_pvo != NULL) break; } /* * We also need the pvo entry of the victim we are replacing * so save the R & C bits of the PTE. */ if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; if (source_pvo != NULL) break; } } if (source_pvo == NULL) { mtx_unlock(&pmap_table_mutex); return (0); } if (victim_pvo == NULL) { if ((pt->pte_hi & PTE_HID) == 0) panic("pmap_pte_spill: victim p-pte (%p) has no pvo" "entry", pt); /* * If this is a secondary PTE, we need to search it's primary * pvo bucket for the matching PVO. */ LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], pvo_olink) { PMAP_PVO_CHECK(pvo); /* * We also need the pvo entry of the victim we are * replacing so save the R & C bits of the PTE. */ if (pmap_pte_compare(pt, &pvo->pvo_pte)) { victim_pvo = pvo; break; } } if (victim_pvo == NULL) panic("pmap_pte_spill: victim s-pte (%p) has no pvo" "entry", pt); } /* * We are invalidating the TLB entry for the EA we are replacing even * though it's valid. If we don't, we lose any ref/chg bit changes * contained in the TLB entry. */ source_pvo->pvo_pte.pte_hi &= ~PTE_HID; pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); pmap_pte_set(pt, &source_pvo->pvo_pte); PVO_PTEGIDX_CLR(victim_pvo); PVO_PTEGIDX_SET(source_pvo, i); pmap_pte_replacements++; PMAP_PVO_CHECK(victim_pvo); PMAP_PVO_CHECK(source_pvo); mtx_unlock(&pmap_table_mutex); return (1); } static int pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) { struct pte *pt; int i; /* * First try primary hash. */ for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi &= ~PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } /* * Now try secondary hash. */ ptegidx ^= pmap_pteg_mask; ptegidx++; for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { if ((pt->pte_hi & PTE_VALID) == 0) { pvo_pt->pte_hi |= PTE_HID; pmap_pte_set(pt, pvo_pt); return (i); } } panic("pmap_pte_insert: overflow"); return (-1); } static boolean_t pmap_query_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; struct pte *pt; #if 0 if (pmap_attr_fetch(m) & ptebit) return (TRUE); #endif LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if we saved the bit off. If so, cache it and return * success. */ if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } /* * No luck, now go through the hard part of looking at the PTEs * themselves. Sync so that any pending REF/CHG bits are flushed to * the PTEs. */ SYNC(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ /* * See if this pvo has a valid PTE. if so, fetch the * REF/CHG bits from the valid PTE. If the appropriate * ptebit is set, cache it and return success. */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) { pmap_attr_save(m, ptebit); PMAP_PVO_CHECK(pvo); /* sanity check */ return (TRUE); } } } return (FALSE); } static u_int pmap_clear_bit(vm_page_t m, int ptebit, int *origbit) { u_int count; struct pvo_entry *pvo; struct pte *pt; int rv; /* * Clear the cached value. */ rv = pmap_attr_fetch(m); pmap_attr_clear(m, ptebit); /* * Sync so that any pending REF/CHG bits are flushed to the PTEs (so * we can reset the right ones). note that since the pvo entries and * list heads are accessed via BAT0 and are never placed in the page * table, we don't have to worry about further accesses setting the * REF/CHG bits. */ SYNC(); /* * For each pvo entry, clear the pvo's ptebit. If this pvo has a * valid pte clear the ptebit from the valid pte. */ count = 0; LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { PMAP_PVO_CHECK(pvo); /* sanity check */ pt = pmap_pvo_to_pte(pvo, -1); if (pt != NULL) { pmap_pte_synch(pt, &pvo->pvo_pte); if (pvo->pvo_pte.pte_lo & ptebit) { count++; pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); } } rv |= pvo->pvo_pte.pte_lo; pvo->pvo_pte.pte_lo &= ~ptebit; PMAP_PVO_CHECK(pvo); /* sanity check */ } if (origbit != NULL) { *origbit = rv; } return (count); } /* * Return true if the physical range is encompassed by the battable[idx] */ static int pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) { u_int prot; u_int32_t start; u_int32_t end; u_int32_t bat_ble; /* * Return immediately if not a valid mapping */ if (!battable[idx].batu & BAT_Vs) return (EINVAL); /* * The BAT entry must be cache-inhibited, guarded, and r/w * so it can function as an i/o page */ prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); if (prot != (BAT_I|BAT_G|BAT_PP_RW)) return (EPERM); /* * The address should be within the BAT range. Assume that the * start address in the BAT has the correct alignment (thus * not requiring masking) */ start = battable[idx].batl & BAT_PBS; bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; end = start | (bat_ble << 15) | 0x7fff; if ((pa < start) || ((pa + size) > end)) return (ERANGE); return (0); } int pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size) { int i; /* * This currently does not work for entries that * overlap 256M BAT segments. */ for(i = 0; i < 16; i++) if (pmap_bat_mapped(i, pa, size) == 0) return (0); return (EFAULT); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, tmpva, ppa, offset; int i; ppa = trunc_page(pa); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); GIANT_REQUIRED; /* * If the physical address lies within a valid BAT table entry, * return the 1:1 mapping. This currently doesn't work * for regions that overlap 256M BAT segments. */ for (i = 0; i < 16; i++) { if (pmap_bat_mapped(i, pa, size) == 0) return ((void *) pa); } va = kmem_alloc_nofault(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { pmap_kenter(tmpva, ppa); TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ size -= PAGE_SIZE; tmpva += PAGE_SIZE; ppa += PAGE_SIZE; } return ((void *)(va + offset)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; /* * If this is outside kernel virtual space, then it's a * battable entry and doesn't require unmapping */ if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); kmem_free(kernel_map, base, size); } } Index: stable/6/sys/sparc64/sparc64/pmap.c =================================================================== --- stable/6/sys/sparc64/sparc64/pmap.c (revision 173366) +++ stable/6/sys/sparc64/sparc64/pmap.c (revision 173367) @@ -1,1980 +1,1978 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD$ */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include "opt_kstack_pages.h" #include "opt_msgbuf.h" #include "opt_pmap.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_paddr_t msgbuf_phys; int pmap_pagedaemon_waken; /* * Map of physical memory reagions. */ vm_paddr_t phys_avail[128]; static struct ofw_mem_region mra[128]; struct ofw_mem_region sparc64_memreg[128]; int sparc64_nmemreg; static struct ofw_map translations[128]; static int translations_size; static vm_offset_t pmap_idle_map; static vm_offset_t pmap_temp_map_1; static vm_offset_t pmap_temp_map_2; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; vm_offset_t vm_max_kernel_address; /* * Kernel pmap. */ struct pmap kernel_pmap_store; /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size); /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. * * The page queues and pmap must be locked. */ static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired); extern int tl1_immu_miss_patch_1[]; extern int tl1_immu_miss_patch_2[]; extern int tl1_dmmu_miss_patch_1[]; extern int tl1_dmmu_miss_patch_2[]; extern int tl1_dmmu_prot_patch_1[]; extern int tl1_dmmu_prot_patch_2[]; /* * If user pmap is processed with pmap_remove and with pmap_remove and the * resident count drops to 0, there are no more pages to remove, so we * need not continue. */ #define PMAP_REMOVE_DONE(pm) \ ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0) /* * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove() * and pmap_protect() instead of trying each virtual address. */ #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE) SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, ""); PMAP_STATS_VAR(pmap_nenter); PMAP_STATS_VAR(pmap_nenter_update); PMAP_STATS_VAR(pmap_nenter_replace); PMAP_STATS_VAR(pmap_nenter_new); PMAP_STATS_VAR(pmap_nkenter); PMAP_STATS_VAR(pmap_nkenter_oc); PMAP_STATS_VAR(pmap_nkenter_stupid); PMAP_STATS_VAR(pmap_nkremove); PMAP_STATS_VAR(pmap_nqenter); PMAP_STATS_VAR(pmap_nqremove); PMAP_STATS_VAR(pmap_ncache_enter); PMAP_STATS_VAR(pmap_ncache_enter_c); PMAP_STATS_VAR(pmap_ncache_enter_oc); PMAP_STATS_VAR(pmap_ncache_enter_cc); PMAP_STATS_VAR(pmap_ncache_enter_coc); PMAP_STATS_VAR(pmap_ncache_enter_nc); PMAP_STATS_VAR(pmap_ncache_enter_cnc); PMAP_STATS_VAR(pmap_ncache_remove); PMAP_STATS_VAR(pmap_ncache_remove_c); PMAP_STATS_VAR(pmap_ncache_remove_oc); PMAP_STATS_VAR(pmap_ncache_remove_cc); PMAP_STATS_VAR(pmap_ncache_remove_coc); PMAP_STATS_VAR(pmap_ncache_remove_nc); PMAP_STATS_VAR(pmap_nzero_page); PMAP_STATS_VAR(pmap_nzero_page_c); PMAP_STATS_VAR(pmap_nzero_page_oc); PMAP_STATS_VAR(pmap_nzero_page_nc); PMAP_STATS_VAR(pmap_nzero_page_area); PMAP_STATS_VAR(pmap_nzero_page_area_c); PMAP_STATS_VAR(pmap_nzero_page_area_oc); PMAP_STATS_VAR(pmap_nzero_page_area_nc); PMAP_STATS_VAR(pmap_nzero_page_idle); PMAP_STATS_VAR(pmap_nzero_page_idle_c); PMAP_STATS_VAR(pmap_nzero_page_idle_oc); PMAP_STATS_VAR(pmap_nzero_page_idle_nc); PMAP_STATS_VAR(pmap_ncopy_page); PMAP_STATS_VAR(pmap_ncopy_page_c); PMAP_STATS_VAR(pmap_ncopy_page_oc); PMAP_STATS_VAR(pmap_ncopy_page_nc); PMAP_STATS_VAR(pmap_ncopy_page_dc); PMAP_STATS_VAR(pmap_ncopy_page_doc); PMAP_STATS_VAR(pmap_ncopy_page_sc); PMAP_STATS_VAR(pmap_ncopy_page_soc); PMAP_STATS_VAR(pmap_nnew_thread); PMAP_STATS_VAR(pmap_nnew_thread_oc); /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct ofw_mem_region *mra; const struct ofw_mem_region *mrb; mra = a; mrb = b; if (mra->mr_start < mrb->mr_start) return (-1); else if (mra->mr_start > mrb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *oma; const struct ofw_map *omb; oma = a; omb = b; if (oma->om_start < omb->om_start) return (-1); else if (oma->om_start > omb->om_start) return (1); else return (0); } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t ekva) { struct pmap *pm; struct tte *tp; vm_offset_t off; vm_offset_t va; vm_paddr_t pa; vm_size_t physsz; vm_size_t virtsz; ihandle_t pmem; ihandle_t vmem; int sz; int i; int j; /* * Find out what physical memory is available from the prom and * initialize the phys_avail array. This must be done before * pmap_bootstrap_alloc is called. */ if ((pmem = OF_finddevice("/memory")) == -1) panic("pmap_bootstrap: finddevice /memory"); if ((sz = OF_getproplen(pmem, "available")) == -1) panic("pmap_bootstrap: getproplen /memory/available"); if (sizeof(phys_avail) < sz) panic("pmap_bootstrap: phys_avail too small"); if (sizeof(mra) < sz) panic("pmap_bootstrap: mra too small"); bzero(mra, sz); if (OF_getprop(pmem, "available", mra, sz) == -1) panic("pmap_bootstrap: getprop /memory/available"); sz /= sizeof(*mra); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(mra, sz, sizeof (*mra), mr_cmp); physsz = 0; getenv_quad("hw.physmem", &physmem); physmem = btoc(physmem); for (i = 0, j = 0; i < sz; i++, j += 2) { CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start, mra[i].mr_size); if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) { if (btoc(physsz) < physmem) { phys_avail[j] = mra[i].mr_start; phys_avail[j + 1] = mra[i].mr_start + (ctob(physmem) - physsz); physsz = ctob(physmem); } break; } phys_avail[j] = mra[i].mr_start; phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size; physsz += mra[i].mr_size; } physmem = btoc(physsz); /* * Calculate the size of kernel virtual memory, and the size and mask * for the kernel tsb. */ virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT)); vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz; tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT); tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1; /* * Allocate the kernel tsb and lock it in the tlb. */ pa = pmap_bootstrap_alloc(tsb_kernel_size); if (pa & PAGE_MASK_4M) panic("pmap_bootstrap: tsb unaligned\n"); tsb_kernel_phys = pa; tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size); pmap_map_tsb(); bzero(tsb_kernel, tsb_kernel_size); /* * Allocate and map the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys); /* * Patch the virtual address and the tsb mask into the trap table. */ #define SETHI(rd, imm22) \ (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \ EIF_IMM((imm22) >> 10, 22)) #define OR_R_I_R(rd, imm13, rs1) \ (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \ EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13)) #define PATCH(addr) do { \ if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \ addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \ addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \ panic("pmap_boostrap: patched instructions have changed"); \ addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \ addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \ addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \ flush(addr); \ flush(addr + 1); \ flush(addr + 2); \ } while (0) PATCH(tl1_immu_miss_patch_1); PATCH(tl1_immu_miss_patch_2); PATCH(tl1_dmmu_miss_patch_1); PATCH(tl1_dmmu_miss_patch_2); PATCH(tl1_dmmu_prot_patch_1); PATCH(tl1_dmmu_prot_patch_2); /* * Enter fake 8k pages for the 4MB kernel pages, so that * pmap_kextract() will work for them. */ for (i = 0; i < kernel_tlb_slots; i++) { pa = kernel_tlbs[i].te_pa; va = kernel_tlbs[i].te_va; for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) { tp = tsb_kvtotte(va + off); tp->tte_vpn = TV_VPN(va + off, TS_8K); tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; } } /* * Set the start and end of kva. The kernel is loaded at the first * available 4 meg super page, so round up to the end of the page. */ virtual_avail = roundup2(ekva, PAGE_SIZE_4M); virtual_end = vm_max_kernel_address; kernel_vm_end = vm_max_kernel_address; /* * Allocate kva space for temporary mappings. */ pmap_idle_map = virtual_avail; virtual_avail += PAGE_SIZE * DCACHE_COLORS; pmap_temp_map_1 = virtual_avail; virtual_avail += PAGE_SIZE * DCACHE_COLORS; pmap_temp_map_2 = virtual_avail; virtual_avail += PAGE_SIZE * DCACHE_COLORS; /* * Allocate a kernel stack with guard page for thread0 and map it into * the kernel tsb. We must ensure that the virtual address is coloured * properly, since we're allocating from phys_avail so the memory won't * have an associated vm_page_t. */ pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) * PAGE_SIZE); kstack0_phys = pa; virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) * PAGE_SIZE; kstack0 = virtual_avail; virtual_avail += roundup(KSTACK_PAGES, DCACHE_COLORS) * PAGE_SIZE; KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys), ("pmap_bootstrap: kstack0 miscoloured")); for (i = 0; i < KSTACK_PAGES; i++) { pa = kstack0_phys + i * PAGE_SIZE; va = kstack0 + i * PAGE_SIZE; tp = tsb_kvtotte(va); tp->tte_vpn = TV_VPN(va, TS_8K); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; } /* * Calculate the last available physical address. */ for (i = 0; phys_avail[i + 2] != 0; i += 2) ; Maxmem = sparc64_btop(phys_avail[i + 1]); /* * Add the prom mappings to the kernel tsb. */ if ((vmem = OF_finddevice("/virtual-memory")) == -1) panic("pmap_bootstrap: finddevice /virtual-memory"); if ((sz = OF_getproplen(vmem, "translations")) == -1) panic("pmap_bootstrap: getproplen translations"); if (sizeof(translations) < sz) panic("pmap_bootstrap: translations too small"); bzero(translations, sz); if (OF_getprop(vmem, "translations", translations, sz) == -1) panic("pmap_bootstrap: getprop /virtual-memory/translations"); sz /= sizeof(*translations); translations_size = sz; CTR0(KTR_PMAP, "pmap_bootstrap: translations"); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR3(KTR_PMAP, "translation: start=%#lx size=%#lx tte=%#lx", translations[i].om_start, translations[i].om_size, translations[i].om_tte); if (translations[i].om_start < VM_MIN_PROM_ADDRESS || translations[i].om_start > VM_MAX_PROM_ADDRESS) continue; for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) { va = translations[i].om_start + off; tp = tsb_kvtotte(va); tp->tte_vpn = TV_VPN(va, TS_8K); tp->tte_data = ((translations[i].om_tte & ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) + off; } } /* * Get the available physical memory ranges from /memory/reg. These * are only used for kernel dumps, but it may not be wise to do prom * calls in that situation. */ if ((sz = OF_getproplen(pmem, "reg")) == -1) panic("pmap_bootstrap: getproplen /memory/reg"); if (sizeof(sparc64_memreg) < sz) panic("pmap_bootstrap: sparc64_memreg too small"); if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1) panic("pmap_bootstrap: getprop /memory/reg"); sparc64_nmemreg = sz / sizeof(*sparc64_memreg); /* * Initialize the kernel pmap (which is statically allocated). * NOTE: PMAP_LOCK_INIT() is needed as part of the initialization * but sparc64 start up is not ready to initialize mutexes yet. * It is called in machdep.c. */ pm = kernel_pmap; for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = TLB_CTX_KERNEL; pm->pm_active = ~0; /* XXX flush all non-locked tlb entries */ } void pmap_map_tsb(void) { vm_offset_t va; vm_paddr_t pa; u_long data; u_long s; int i; s = intr_disable(); /* * Map the 4mb tsb pages. */ for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) { va = (vm_offset_t)tsb_kernel + i; pa = tsb_kernel_phys + i; data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; /* XXX - cheetah */ stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(TLB_CTX_KERNEL)); stxa_sync(0, ASI_DTLB_DATA_IN_REG, data); } /* * Set the secondary context to be the kernel context (needed for * fp block operations in the kernel and the cache code). */ stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL); membar(Sync); intr_restore(s); } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size) { vm_paddr_t pa; int i; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i + 1] - phys_avail[i] < size) continue; pa = phys_avail[i]; phys_avail[i] += size; return (pa); } panic("pmap_bootstrap_alloc"); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.tte_list); m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m)); m->md.flags = 0; m->md.pmap = NULL; } /* * Initialize the pmap module. */ void pmap_init(void) { vm_offset_t addr; vm_size_t size; int result; int i; for (i = 0; i < translations_size; i++) { addr = translations[i].om_start; size = translations[i].om_size; if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS) continue; result = vm_map_find(kernel_map, NULL, 0, &addr, size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); if (result != KERN_SUCCESS || addr != translations[i].om_start) panic("pmap_init: vm_map_find"); } } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2(void) { } /* * Extract the physical page address associated with the given * map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pm, vm_offset_t va) { struct tte *tp; vm_paddr_t pa; if (pm == kernel_pmap) return (pmap_kextract(va)); PMAP_LOCK(pm); tp = tsb_tte_lookup(pm, va); if (tp == NULL) pa = 0; else pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)); PMAP_UNLOCK(pm); return (pa); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. */ vm_page_t pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot) { struct tte *tp; vm_page_t m; m = NULL; vm_page_lock_queues(); if (pm == kernel_pmap) { if (va >= VM_MIN_DIRECT_ADDRESS) { tp = NULL; m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va)); vm_page_hold(m); } else { tp = tsb_kvtotte(va); if ((tp->tte_data & TD_V) == 0) tp = NULL; } } else { PMAP_LOCK(pm); tp = tsb_tte_lookup(pm, va); } if (tp != NULL && ((tp->tte_data & TD_SW) || (prot & VM_PROT_WRITE) == 0)) { m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); vm_page_hold(m); } vm_page_unlock_queues(); if (pm != kernel_pmap) PMAP_UNLOCK(pm); return (m); } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_paddr_t pmap_kextract(vm_offset_t va) { struct tte *tp; if (va >= VM_MIN_DIRECT_ADDRESS) return (TLB_DIRECT_TO_PHYS(va)); tp = tsb_kvtotte(va); if ((tp->tte_data & TD_V) == 0) return (0); return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp))); } int pmap_cache_enter(vm_page_t m, vm_offset_t va) { struct tte *tp; int color; mtx_assert(&vm_page_queue_mtx, MA_OWNED); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_cache_enter: fake page")); PMAP_STATS_INC(pmap_ncache_enter); /* * Find the color for this virtual address and note the added mapping. */ color = DCACHE_COLOR(va); m->md.colors[color]++; /* * If all existing mappings have the same color, the mapping is * cacheable. */ if (m->md.color == color) { KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0, ("pmap_cache_enter: cacheable, mappings of other color")); if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) PMAP_STATS_INC(pmap_ncache_enter_c); else PMAP_STATS_INC(pmap_ncache_enter_oc); return (1); } /* * If there are no mappings of the other color, and the page still has * the wrong color, this must be a new mapping. Change the color to * match the new mapping, which is cacheable. We must flush the page * from the cache now. */ if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) { KASSERT(m->md.colors[color] == 1, ("pmap_cache_enter: changing color, not new mapping")); dcache_page_inval(VM_PAGE_TO_PHYS(m)); m->md.color = color; if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) PMAP_STATS_INC(pmap_ncache_enter_cc); else PMAP_STATS_INC(pmap_ncache_enter_coc); return (1); } /* * If the mapping is already non-cacheable, just return. */ if (m->md.color == -1) { PMAP_STATS_INC(pmap_ncache_enter_nc); return (0); } PMAP_STATS_INC(pmap_ncache_enter_cnc); /* * Mark all mappings as uncacheable, flush any lines with the other * color out of the dcache, and set the color to none (-1). */ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { atomic_clear_long(&tp->tte_data, TD_CV); tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } dcache_page_inval(VM_PAGE_TO_PHYS(m)); m->md.color = -1; return (0); } void pmap_cache_remove(vm_page_t m, vm_offset_t va) { struct tte *tp; int color; mtx_assert(&vm_page_queue_mtx, MA_OWNED); CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_cache_remove: fake page")); KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0, ("pmap_cache_remove: no mappings %d <= 0", m->md.colors[DCACHE_COLOR(va)])); PMAP_STATS_INC(pmap_ncache_remove); /* * Find the color for this virtual address and note the removal of * the mapping. */ color = DCACHE_COLOR(va); m->md.colors[color]--; /* * If the page is cacheable, just return and keep the same color, even * if there are no longer any mappings. */ if (m->md.color != -1) { if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) PMAP_STATS_INC(pmap_ncache_remove_c); else PMAP_STATS_INC(pmap_ncache_remove_oc); return; } KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0, ("pmap_cache_remove: uncacheable, no mappings of other color")); /* * If the page is not cacheable (color is -1), and the number of * mappings for this color is not zero, just return. There are * mappings of the other color still, so remain non-cacheable. */ if (m->md.colors[color] != 0) { PMAP_STATS_INC(pmap_ncache_remove_nc); return; } /* * The number of mappings for this color is now zero. Recache the * other colored mappings, and change the page color to the other * color. There should be no lines in the data cache for this page, * so flushing should not be needed. */ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { atomic_set_long(&tp->tte_data, TD_CV); tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } m->md.color = DCACHE_OTHER_COLOR(color); if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) PMAP_STATS_INC(pmap_ncache_remove_cc); else PMAP_STATS_INC(pmap_ncache_remove_coc); } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_page_t m) { vm_offset_t ova; struct tte *tp; vm_page_t om; u_long data; mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_STATS_INC(pmap_nkenter); tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data); if (m->pc != DCACHE_COLOR(va)) { CTR6(KTR_CT2, "pmap_kenter: off colour va=%#lx pa=%#lx o=%p oc=%#lx ot=%d pi=%#lx", va, VM_PAGE_TO_PHYS(m), m->object, m->object ? m->object->pg_color : -1, m->object ? m->object->type : -1, m->pindex); PMAP_STATS_INC(pmap_nkenter_oc); } if ((tp->tte_data & TD_V) != 0) { om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); ova = TTE_GET_VA(tp); if (m == om && va == ova) { PMAP_STATS_INC(pmap_nkenter_stupid); return; } TAILQ_REMOVE(&om->md.tte_list, tp, tte_link); pmap_cache_remove(om, ova); if (va != ova) tlb_page_demap(kernel_pmap, ova); } data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP | TD_P | TD_W; if (pmap_cache_enter(m, va) != 0) data |= TD_CV; tp->tte_vpn = TV_VPN(va, TS_8K); tp->tte_data = data; TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); } /* * Map a wired page into kernel virtual address space. This additionally * takes a flag argument wich is or'ed to the TTE data. This is used by * bus_space_map(). * NOTE: if the mapping is non-cacheable, it's the caller's responsibility * to flush entries that might still be in the cache, if applicable. */ void pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags) { struct tte *tp; tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx", va, pa, tp, tp->tte_data); tp->tte_vpn = TV_VPN(va, TS_8K); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags; } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { struct tte *tp; vm_page_t m; mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_STATS_INC(pmap_nkremove); tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); if ((tp->tte_data & TD_V) == 0) return; m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); pmap_cache_remove(m, va); TTE_ZERO(tp); } /* * Inverse of pmap_kenter_flags, used by bus_space_unmap(). */ void pmap_kremove_flags(vm_offset_t va) { struct tte *tp; tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); TTE_ZERO(tp); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return (TLB_PHYS_TO_DIRECT(start)); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; int locked; PMAP_STATS_INC(pmap_nqenter); va = sva; if (!(locked = mtx_owned(&vm_page_queue_mtx))) vm_page_lock_queues(); while (count-- > 0) { pmap_kenter(va, *m); va += PAGE_SIZE; m++; } if (!locked) vm_page_unlock_queues(); tlb_range_demap(kernel_pmap, sva, va); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; int locked; PMAP_STATS_INC(pmap_nqremove); va = sva; if (!(locked = mtx_owned(&vm_page_queue_mtx))) vm_page_lock_queues(); while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } if (!locked) vm_page_unlock_queues(); tlb_range_demap(kernel_pmap, sva, va); } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { int i; PMAP_LOCK_INIT(pm); for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = 0; pm->pm_active = 0; pm->pm_tsb = NULL; pm->pm_tsb_obj = NULL; bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Initialize a preallocated and zeroed pmap structure, such as one in a * vmspace structure. */ void pmap_pinit(pmap_t pm) { vm_page_t ma[TSB_PAGES]; vm_page_t m; int i; PMAP_LOCK_INIT(pm); /* * Allocate kva space for the tsb. */ if (pm->pm_tsb == NULL) { pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map, TSB_BSIZE); } /* * Allocate an object for it. */ if (pm->pm_tsb_obj == NULL) pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES); VM_OBJECT_LOCK(pm->pm_tsb_obj); for (i = 0; i < TSB_PAGES; i++) { m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY | VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO); vm_page_lock_queues(); m->valid = VM_PAGE_BITS_ALL; m->md.pmap = pm; vm_page_unlock_queues(); ma[i] = m; } VM_OBJECT_UNLOCK(pm->pm_tsb_obj); pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES); for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = -1; pm->pm_active = 0; bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pm) { vm_object_t obj; vm_page_t m; struct pcpu *pc; CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb); KASSERT(pmap_resident_count(pm) == 0, ("pmap_release: resident pages %ld != 0", pmap_resident_count(pm))); /* * After the pmap was freed, it might be reallocated to a new process. * When switching, this might lead us to wrongly assume that we need * not switch contexts because old and new pmap pointer are equal. * Therefore, make sure that this pmap is not referenced by any PCPU * pointer any more. This could happen in two cases: * - A process that referenced the pmap is currently exiting on a CPU. * However, it is guaranteed to not switch in any more after setting * its state to PRS_ZOMBIE. * - A process that referenced this pmap ran on a CPU, but we switched * to a kernel thread, leaving the pmap pointer unchanged. */ mtx_lock_spin(&sched_lock); SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc->pc_pmap == pm) pc->pc_pmap = NULL; } mtx_unlock_spin(&sched_lock); obj = pm->pm_tsb_obj; VM_OBJECT_LOCK(obj); KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); while (!TAILQ_EMPTY(&obj->memq)) { m = TAILQ_FIRST(&obj->memq); vm_page_lock_queues(); if (vm_page_sleep_if_busy(m, FALSE, "pmaprl")) continue; KASSERT(m->hold_count == 0, ("pmap_release: freeing held tsb page")); m->md.pmap = NULL; m->wire_count--; atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free_zero(m); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(obj); pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES); PMAP_LOCK_DESTROY(pm); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { panic("pmap_growkernel: can't grow kernel"); } int pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va) { vm_page_t m; u_long data; mtx_assert(&vm_page_queue_mtx, MA_OWNED); data = atomic_readandclear_long(&tp->tte_data); if ((data & TD_FAKE) == 0) { m = PHYS_TO_VM_PAGE(TD_PA(data)); TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); if ((data & TD_WIRED) != 0) pm->pm_stats.wired_count--; if ((data & TD_PV) != 0) { if ((data & TD_W) != 0) vm_page_dirty(m); if ((data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); if (TAILQ_EMPTY(&m->md.tte_list)) vm_page_flag_clear(m, PG_WRITEABLE); pm->pm_stats.resident_count--; } pmap_cache_remove(m, va); } TTE_ZERO(tp); if (PMAP_REMOVE_DONE(pm)) return (0); return (1); } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) { struct tte *tp; vm_offset_t va; CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", pm->pm_context[PCPU_GET(cpuid)], start, end); if (PMAP_REMOVE_DONE(pm)) return; vm_page_lock_queues(); PMAP_LOCK(pm); if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); tlb_context_demap(pm); } else { for (va = start; va < end; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) { if (!pmap_remove_tte(pm, NULL, tp, va)) break; } } tlb_range_demap(pm, start, end - 1); } PMAP_UNLOCK(pm); vm_page_unlock_queues(); } void pmap_remove_all(vm_page_t m) { struct pmap *pm; struct tte *tpn; struct tte *tp; vm_offset_t va; mtx_assert(&vm_page_queue_mtx, MA_OWNED); for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = TAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) continue; pm = TTE_GET_PMAP(tp); va = TTE_GET_VA(tp); PMAP_LOCK(pm); if ((tp->tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; if ((tp->tte_data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); if ((tp->tte_data & TD_W) != 0) vm_page_dirty(m); tp->tte_data &= ~TD_V; tlb_page_demap(pm, va); TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); pm->pm_stats.resident_count--; pmap_cache_remove(m, va); TTE_ZERO(tp); PMAP_UNLOCK(pm); } vm_page_flag_clear(m, PG_WRITEABLE); } int pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va) { u_long data; vm_page_t m; data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W); if ((data & TD_PV) != 0) { m = PHYS_TO_VM_PAGE(TD_PA(data)); if ((data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); if ((data & TD_W) != 0) vm_page_dirty(m); } return (1); } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va; struct tte *tp; CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } if (prot & VM_PROT_WRITE) return; vm_page_lock_queues(); PMAP_LOCK(pm); if (eva - sva > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); tlb_context_demap(pm); } else { for (va = sva; va < eva; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) pmap_protect_tte(pm, NULL, tp, va); } tlb_range_demap(pm, sva, eva - 1); } PMAP_UNLOCK(pm); vm_page_unlock_queues(); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { vm_page_lock_queues(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot, wired); vm_page_unlock_queues(); PMAP_UNLOCK(pm); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. * * The page queues and pmap must be locked. */ static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct tte *tp; vm_paddr_t pa; u_long data; int i; mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pm, MA_OWNED); PMAP_STATS_INC(pmap_nenter); pa = VM_PAGE_TO_PHYS(m); /* * If this is a fake page from the device_pager, but it covers actual * physical memory, convert to the real backing page. */ if ((m->flags & PG_FICTITIOUS) != 0) { for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) { m = PHYS_TO_VM_PAGE(pa); break; } } } CTR6(KTR_PMAP, "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired); /* * If there is an existing mapping, and the physical address has not * changed, must be protection or wiring change. */ if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { CTR0(KTR_PMAP, "pmap_enter: update"); PMAP_STATS_INC(pmap_nenter_update); /* * Wiring change, just update stats. */ if (wired) { if ((tp->tte_data & TD_WIRED) == 0) { tp->tte_data |= TD_WIRED; pm->pm_stats.wired_count++; } } else { if ((tp->tte_data & TD_WIRED) != 0) { tp->tte_data &= ~TD_WIRED; pm->pm_stats.wired_count--; } } /* * Save the old bits and clear the ones we're interested in. */ data = tp->tte_data; tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W); /* * If we're turning off write permissions, sense modify status. */ if ((prot & VM_PROT_WRITE) != 0) { tp->tte_data |= TD_SW; if (wired) { tp->tte_data |= TD_W; } } else if ((data & TD_W) != 0) { vm_page_dirty(m); } /* * If we're turning on execute permissions, flush the icache. */ if ((prot & VM_PROT_EXECUTE) != 0) { if ((data & TD_EXEC) == 0) { icache_page_inval(pa); } tp->tte_data |= TD_EXEC; } /* * Delete the old mapping. */ tlb_page_demap(pm, TTE_GET_VA(tp)); } else { /* * If there is an existing mapping, but its for a different * phsyical address, delete the old mapping. */ if (tp != NULL) { CTR0(KTR_PMAP, "pmap_enter: replace"); PMAP_STATS_INC(pmap_nenter_replace); pmap_remove_tte(pm, NULL, tp, va); tlb_page_demap(pm, va); } else { CTR0(KTR_PMAP, "pmap_enter: new"); PMAP_STATS_INC(pmap_nenter_new); } /* * Now set up the data and install the new mapping. */ data = TD_V | TD_8K | TD_PA(pa); if (pm == kernel_pmap) data |= TD_P; if (prot & VM_PROT_WRITE) data |= TD_SW; if (prot & VM_PROT_EXECUTE) { data |= TD_EXEC; icache_page_inval(pa); } /* * If its wired update stats. We also don't need reference or * modify tracking for wired mappings, so set the bits now. */ if (wired) { pm->pm_stats.wired_count++; data |= TD_REF | TD_WIRED; if ((prot & VM_PROT_WRITE) != 0) data |= TD_W; } tsb_tte_enter(pm, m, va, TS_8K, data); } } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; psize = atop(end - start); m = m_start; PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pm); } -vm_page_t -pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, - vm_page_t mpte) +void +pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); PMAP_UNLOCK(pm); - return (NULL); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, ("pmap_object_init_pt: non-device object")); } /* * Change the wiring attribute for a map/virtual-address pair. * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct tte *tp; u_long data; PMAP_LOCK(pm); if ((tp = tsb_tte_lookup(pm, va)) != NULL) { if (wired) { data = atomic_set_long(&tp->tte_data, TD_WIRED); if ((data & TD_WIRED) == 0) pm->pm_stats.wired_count++; } else { data = atomic_clear_long(&tp->tte_data, TD_WIRED); if ((data & TD_WIRED) != 0) pm->pm_stats.wired_count--; } } PMAP_UNLOCK(pm); } static int pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va) { vm_page_t m; u_long data; if ((tp->tte_data & TD_FAKE) != 0) return (1); if (tsb_tte_lookup(dst_pmap, va) == NULL) { data = tp->tte_data & ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W); m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); tsb_tte_enter(dst_pmap, m, va, TS_8K, data); } return (1); } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { struct tte *tp; vm_offset_t va; if (dst_addr != src_addr) return; vm_page_lock_queues(); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } if (len > PMAP_TSB_THRESH) { tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, pmap_copy_tte); tlb_context_demap(dst_pmap); } else { for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL) pmap_copy_tte(src_pmap, dst_pmap, tp, va); } tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); } vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } void pmap_zero_page(vm_page_t m) { struct tte *tp; vm_offset_t va; vm_paddr_t pa; KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_zero_page: fake page")); PMAP_STATS_INC(pmap_nzero_page); pa = VM_PAGE_TO_PHYS(m); if (m->md.color == -1) { PMAP_STATS_INC(pmap_nzero_page_nc); aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); } else if (m->md.color == DCACHE_COLOR(pa)) { PMAP_STATS_INC(pmap_nzero_page_c); va = TLB_PHYS_TO_DIRECT(pa); cpu_block_zero((void *)va, PAGE_SIZE); } else { PMAP_STATS_INC(pmap_nzero_page_oc); PMAP_LOCK(kernel_pmap); va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); tp = tsb_kvtotte(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(va, TS_8K); cpu_block_zero((void *)va, PAGE_SIZE); tlb_page_demap(kernel_pmap, va); PMAP_UNLOCK(kernel_pmap); } } void pmap_zero_page_area(vm_page_t m, int off, int size) { struct tte *tp; vm_offset_t va; vm_paddr_t pa; KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_zero_page_area: fake page")); KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size")); PMAP_STATS_INC(pmap_nzero_page_area); pa = VM_PAGE_TO_PHYS(m); if (m->md.color == -1) { PMAP_STATS_INC(pmap_nzero_page_area_nc); aszero(ASI_PHYS_USE_EC, pa + off, size); } else if (m->md.color == DCACHE_COLOR(pa)) { PMAP_STATS_INC(pmap_nzero_page_area_c); va = TLB_PHYS_TO_DIRECT(pa); bzero((void *)(va + off), size); } else { PMAP_STATS_INC(pmap_nzero_page_area_oc); PMAP_LOCK(kernel_pmap); va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); tp = tsb_kvtotte(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(va, TS_8K); bzero((void *)(va + off), size); tlb_page_demap(kernel_pmap, va); PMAP_UNLOCK(kernel_pmap); } } void pmap_zero_page_idle(vm_page_t m) { struct tte *tp; vm_offset_t va; vm_paddr_t pa; KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_zero_page_idle: fake page")); PMAP_STATS_INC(pmap_nzero_page_idle); pa = VM_PAGE_TO_PHYS(m); if (m->md.color == -1) { PMAP_STATS_INC(pmap_nzero_page_idle_nc); aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); } else if (m->md.color == DCACHE_COLOR(pa)) { PMAP_STATS_INC(pmap_nzero_page_idle_c); va = TLB_PHYS_TO_DIRECT(pa); cpu_block_zero((void *)va, PAGE_SIZE); } else { PMAP_STATS_INC(pmap_nzero_page_idle_oc); va = pmap_idle_map + (m->md.color * PAGE_SIZE); tp = tsb_kvtotte(va); tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(va, TS_8K); cpu_block_zero((void *)va, PAGE_SIZE); tlb_page_demap(kernel_pmap, va); } } void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t vdst; vm_offset_t vsrc; vm_paddr_t pdst; vm_paddr_t psrc; struct tte *tp; KASSERT((mdst->flags & PG_FICTITIOUS) == 0, ("pmap_copy_page: fake dst page")); KASSERT((msrc->flags & PG_FICTITIOUS) == 0, ("pmap_copy_page: fake src page")); PMAP_STATS_INC(pmap_ncopy_page); pdst = VM_PAGE_TO_PHYS(mdst); psrc = VM_PAGE_TO_PHYS(msrc); if (msrc->md.color == -1 && mdst->md.color == -1) { PMAP_STATS_INC(pmap_ncopy_page_nc); ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE); } else if (msrc->md.color == DCACHE_COLOR(psrc) && mdst->md.color == DCACHE_COLOR(pdst)) { PMAP_STATS_INC(pmap_ncopy_page_c); vdst = TLB_PHYS_TO_DIRECT(pdst); vsrc = TLB_PHYS_TO_DIRECT(psrc); cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); } else if (msrc->md.color == -1) { if (mdst->md.color == DCACHE_COLOR(pdst)) { PMAP_STATS_INC(pmap_ncopy_page_dc); vdst = TLB_PHYS_TO_DIRECT(pdst); ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, PAGE_SIZE); } else { PMAP_STATS_INC(pmap_ncopy_page_doc); PMAP_LOCK(kernel_pmap); vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); tp = tsb_kvtotte(vdst); tp->tte_data = TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(vdst, TS_8K); ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, PAGE_SIZE); tlb_page_demap(kernel_pmap, vdst); PMAP_UNLOCK(kernel_pmap); } } else if (mdst->md.color == -1) { if (msrc->md.color == DCACHE_COLOR(psrc)) { PMAP_STATS_INC(pmap_ncopy_page_sc); vsrc = TLB_PHYS_TO_DIRECT(psrc); ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, PAGE_SIZE); } else { PMAP_STATS_INC(pmap_ncopy_page_soc); PMAP_LOCK(kernel_pmap); vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE); tp = tsb_kvtotte(vsrc); tp->tte_data = TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(vsrc, TS_8K); ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, PAGE_SIZE); tlb_page_demap(kernel_pmap, vsrc); PMAP_UNLOCK(kernel_pmap); } } else { PMAP_STATS_INC(pmap_ncopy_page_oc); PMAP_LOCK(kernel_pmap); vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); tp = tsb_kvtotte(vdst); tp->tte_data = TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(vdst, TS_8K); vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE); tp = tsb_kvtotte(vsrc); tp->tte_data = TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; tp->tte_vpn = TV_VPN(vsrc, TS_8K); cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); tlb_page_demap(kernel_pmap, vdst); tlb_page_demap(kernel_pmap, vsrc); PMAP_UNLOCK(kernel_pmap); } } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pm, vm_page_t m) { struct tte *tp; int loops; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); loops = 0; TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if (TTE_GET_PMAP(tp) == pm) return (TRUE); if (++loops >= 16) break; } return (FALSE); } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove n the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { } /* * Returns TRUE if the given page has a managed mapping. */ boolean_t pmap_page_is_mapped(vm_page_t m) { struct tte *tp; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) != 0) return (TRUE); } return (FALSE); } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_page_protect: fake page")); if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) pmap_clear_write(m); else pmap_remove_all(m); } } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { struct tte *tpf; struct tte *tpn; struct tte *tp; u_long data; int count; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (0); count = 0; if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { tpf = tp; do { tpn = TAILQ_NEXT(tp, tte_link); TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); if ((tp->tte_data & TD_PV) == 0) continue; data = atomic_clear_long(&tp->tte_data, TD_REF); if ((data & TD_REF) != 0 && ++count > 4) break; } while ((tp = tpn) != NULL && tp != tpf); } return (count); } boolean_t pmap_is_modified(vm_page_t m) { struct tte *tp; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; if ((tp->tte_data & TD_W) != 0) return (TRUE); } return (FALSE); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { return (FALSE); } void pmap_clear_modify(vm_page_t m) { struct tte *tp; u_long data; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; data = atomic_clear_long(&tp->tte_data, TD_W); if ((data & TD_W) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } void pmap_clear_reference(vm_page_t m) { struct tte *tp; u_long data; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return; TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; data = atomic_clear_long(&tp->tte_data, TD_REF); if ((data & TD_REF) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } void pmap_clear_write(vm_page_t m) { struct tte *tp; u_long data; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->flags & PG_WRITEABLE) == 0) return; TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); if ((data & TD_W) != 0) { vm_page_dirty(m); tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } vm_page_flag_clear(m, PG_WRITEABLE); } int pmap_mincore(pmap_t pm, vm_offset_t addr) { /* TODO; */ return (0); } /* * Activate a user pmap. The pmap must be activated before its address space * can be accessed in any way. */ void pmap_activate(struct thread *td) { struct vmspace *vm; struct pmap *pm; int context; vm = td->td_proc->p_vmspace; pm = vmspace_pmap(vm); mtx_lock_spin(&sched_lock); context = PCPU_GET(tlb_ctx); if (context == PCPU_GET(tlb_ctx_max)) { tlb_flush_user(); context = PCPU_GET(tlb_ctx_min); } PCPU_SET(tlb_ctx, context + 1); pm->pm_context[PCPU_GET(cpuid)] = context; pm->pm_active |= PCPU_GET(cpumask); PCPU_SET(pmap, pm); stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb); stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb); stxa(AA_DMMU_PCXR, ASI_DMMU, context); membar(Sync); mtx_unlock_spin(&sched_lock); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } Index: stable/6/sys/vm/pmap.h =================================================================== --- stable/6/sys/vm/pmap.h (revision 173366) +++ stable/6/sys/vm/pmap.h (revision 173367) @@ -1,141 +1,141 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.h 8.1 (Berkeley) 6/11/93 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Author: Avadis Tevanian, Jr. * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * * $FreeBSD$ */ /* * Machine address mapping definitions -- machine-independent * section. [For machine-dependent section, see "machine/pmap.h".] */ #ifndef _PMAP_VM_ #define _PMAP_VM_ /* * Each machine dependent implementation is expected to * keep certain statistics. They may do this anyway they * so choose, but are expected to return the statistics * in the following structure. */ struct pmap_statistics { long resident_count; /* # of pages mapped (total) */ long wired_count; /* # of pages wired */ }; typedef struct pmap_statistics *pmap_statistics_t; #include #ifdef _KERNEL struct proc; struct thread; /* * Updates to kernel_vm_end are synchronized by the kernel_map's system mutex. */ extern vm_offset_t kernel_vm_end; extern int pmap_pagedaemon_waken; void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); void pmap_clear_modify(vm_page_t m); void pmap_clear_reference(vm_page_t m); void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); void pmap_copy_page(vm_page_t, vm_page_t); void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); -vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, vm_page_t mpte); +void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot); void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot); vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va); vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot); void pmap_growkernel(vm_offset_t); void pmap_init(void); boolean_t pmap_is_modified(vm_page_t m); boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va); boolean_t pmap_ts_referenced(vm_page_t m); vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size); boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m); void pmap_page_init(vm_page_t m); void pmap_page_protect(vm_page_t m, vm_prot_t prot); void pmap_pinit(pmap_t); void pmap_pinit0(pmap_t); void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); void pmap_qenter(vm_offset_t, vm_page_t *, int); void pmap_qremove(vm_offset_t, int); void pmap_release(pmap_t); void pmap_remove(pmap_t, vm_offset_t, vm_offset_t); void pmap_remove_all(vm_page_t m); void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); void pmap_zero_page_idle(vm_page_t); int pmap_mincore(pmap_t pmap, vm_offset_t addr); void pmap_activate(struct thread *td); vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size); void pmap_init2(void); #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) #define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) #endif /* _KERNEL */ #endif /* _PMAP_VM_ */ Index: stable/6/sys/vm/vm_fault.c =================================================================== --- stable/6/sys/vm/vm_fault.c (revision 173366) +++ stable/6/sys/vm/vm_fault.c (revision 173367) @@ -1,1350 +1,1348 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Page fault handling module. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX Temporary for VFS_LOCK_GIANT() */ #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) static int prefault_pageorder[] = { -1 * PAGE_SIZE, 1 * PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE, -3 * PAGE_SIZE, 3 * PAGE_SIZE, -4 * PAGE_SIZE, 4 * PAGE_SIZE }; static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *); static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t); #define VM_FAULT_READ_AHEAD 8 #define VM_FAULT_READ_BEHIND 7 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) struct faultstate { vm_page_t m; vm_object_t object; vm_pindex_t pindex; vm_page_t first_m; vm_object_t first_object; vm_pindex_t first_pindex; vm_map_t map; vm_map_entry_t entry; int lookup_still_valid; struct vnode *vp; }; static __inline void release_page(struct faultstate *fs) { vm_page_lock_queues(); vm_page_wakeup(fs->m); vm_page_deactivate(fs->m); vm_page_unlock_queues(); fs->m = NULL; } static __inline void unlock_map(struct faultstate *fs) { if (fs->lookup_still_valid) { vm_map_lookup_done(fs->map, fs->entry); fs->lookup_still_valid = FALSE; } } static void unlock_and_deallocate(struct faultstate *fs) { boolean_t firstobjneedgiant; vm_object_pip_wakeup(fs->object); VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); vm_page_lock_queues(); vm_page_free(fs->first_m); vm_page_unlock_queues(); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; } firstobjneedgiant = (fs->first_object->flags & OBJ_NEEDGIANT) != 0; vm_object_deallocate(fs->first_object); unlock_map(fs); if (fs->vp != NULL) { int vfslocked; vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount); vput(fs->vp); fs->vp = NULL; VFS_UNLOCK_GIANT(vfslocked); } if (firstobjneedgiant) VM_UNLOCK_GIANT(); } /* * TRYPAGER - used by vm_fault to calculate whether the pager for the * current object *might* contain the page. * * default objects are zero-fill, there is no real pager. */ #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) /* * vm_fault: * * Handle a page fault occurring at the given address, * requiring the given permissions, in the map specified. * If successful, the page is inserted into the * associated physical map. * * NOTE: the given address should be truncated to the * proper page address. * * KERN_SUCCESS is returned if the page fault is handled; otherwise, * a standard error specifying why the fault is fatal is returned. * * * The map in question must be referenced, and remains so. * Caller may hold no locks. */ int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) { vm_prot_t prot; int is_first_object_locked, result; boolean_t growstack, wired; int map_generation; vm_object_t next_object; vm_page_t marray[VM_FAULT_READ]; int hardfault; int faultcount; struct faultstate fs; hardfault = 0; growstack = TRUE; atomic_add_int(&cnt.v_vm_faults, 1); RetryFault:; /* * Find the backing store object and offset into it to begin the * search. */ fs.map = map; result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) { if (result != KERN_PROTECTION_FAILURE || (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) { if (growstack && result == KERN_INVALID_ADDRESS && map != kernel_map && curproc != NULL) { result = vm_map_growstack(curproc, vaddr); if (result != KERN_SUCCESS) return (KERN_FAILURE); growstack = FALSE; goto RetryFault; } return (result); } /* * If we are user-wiring a r/w segment, and it is COW, then * we need to do the COW operation. Note that we don't COW * currently RO sections now, because it is NOT desirable * to COW .text. We simply keep .text from ever being COW'ed * and take the heat that one cannot debug wired .text sections. */ result = vm_map_lookup(&fs.map, vaddr, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) return (result); /* * If we don't COW now, on a user wire, the user will never * be able to write to the mapping. If we don't make this * restriction, the bookkeeping would be nearly impossible. * * XXX The following assignment modifies the map without * holding a write lock on it. */ if ((fs.entry->protection & VM_PROT_WRITE) == 0) fs.entry->max_protection &= ~VM_PROT_WRITE; } map_generation = fs.map->timestamp; if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { panic("vm_fault: fault on nofault entry, addr: %lx", (u_long)vaddr); } /* * Make a reference to this object to prevent its disposal while we * are messing with it. Once we have the reference, the map is free * to be diddled. Since objects reference their shadows (and copies), * they will stay around as well. * * Bump the paging-in-progress count to prevent size changes (e.g. * truncation operations) during I/O. This must be done after * obtaining the vnode lock in order to avoid possible deadlocks. * * XXX vnode_pager_lock() can block without releasing the map lock. */ if (fs.first_object->flags & OBJ_NEEDGIANT) mtx_lock(&Giant); VM_OBJECT_LOCK(fs.first_object); vm_object_reference_locked(fs.first_object); fs.vp = vnode_pager_lock(fs.first_object); KASSERT(fs.vp == NULL || !fs.map->system_map, ("vm_fault: vnode-backed object mapped by system map")); KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 || !fs.map->system_map, ("vm_fault: Object requiring giant mapped by system map")); if (fs.first_object->flags & OBJ_NEEDGIANT && debug_mpsafevm) mtx_unlock(&Giant); vm_object_pip_add(fs.first_object, 1); fs.lookup_still_valid = TRUE; if (wired) fault_type = prot; fs.first_m = NULL; /* * Search for the page at object/offset. */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; while (TRUE) { /* * If the object is dead, we stop here */ if (fs.object->flags & OBJ_DEAD) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * See if page is resident */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (fs.m != NULL) { int queue; /* * check for page-based copy on write. * We check fs.object == fs.first_object so * as to ensure the legacy COW mechanism is * used when the page in question is part of * a shadow object. Otherwise, vm_page_cowfault() * removes the page from the backing object, * which is not what we want. */ vm_page_lock_queues(); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); vm_page_unlock_queues(); unlock_and_deallocate(&fs); goto RetryFault; } /* * Wait/Retry if the page is busy. We have to do this * if the page is busy via either PG_BUSY or * vm_page_t->busy because the vm_pager may be using * vm_page_t->busy for pageouts ( and even pageins if * it is the vnode pager ), and we could end up trying * to pagein and pageout the same page simultaneously. * * We can theoretically allow the busy case on a read * fault if the page is marked valid, but since such * pages are typically already pmap'd, putting that * special case in might be more effort then it is * worth. We cannot under any circumstances mess * around with a vm_page_t->busy page except, perhaps, * to pmap it. */ if ((fs.m->flags & PG_BUSY) || fs.m->busy) { vm_page_unlock_queues(); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); vm_page_lock_queues(); vm_page_free(fs.first_m); vm_page_unlock_queues(); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; } unlock_map(&fs); if (fs.vp != NULL) { int vfslck; vfslck = VFS_LOCK_GIANT(fs.vp->v_mount); vput(fs.vp); fs.vp = NULL; VFS_UNLOCK_GIANT(vfslck); } VM_OBJECT_LOCK(fs.object); if (fs.m == vm_page_lookup(fs.object, fs.pindex)) { vm_page_lock_queues(); if (!vm_page_sleep_if_busy(fs.m, TRUE, "vmpfw")) vm_page_unlock_queues(); } vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); atomic_add_int(&cnt.v_intrans, 1); if (fs.first_object->flags & OBJ_NEEDGIANT) VM_UNLOCK_GIANT(); vm_object_deallocate(fs.first_object); goto RetryFault; } queue = fs.m->queue; vm_pageq_remove_nowakeup(fs.m); if ((queue - fs.m->pc) == PQ_CACHE) { cnt.v_reactivated++; if (vm_page_count_severe()) { vm_page_activate(fs.m); vm_page_unlock_queues(); unlock_and_deallocate(&fs); VM_WAITPFAULT; goto RetryFault; } } /* * Mark page busy for other processes, and the * pagedaemon. If it still isn't completely valid * (readable), jump to readrest, else break-out ( we * found the page ). */ vm_page_busy(fs.m); vm_page_unlock_queues(); if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && fs.m->object != kernel_object && fs.m->object != kmem_object) { goto readrest; } break; } /* * Page is not resident, If this is the search termination * or the pager might contain the page, allocate a new page. */ if (TRYPAGER || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * Allocate a new page for this object/offset pair. */ fs.m = NULL; if (!vm_page_count_severe()) { fs.m = vm_page_alloc(fs.object, fs.pindex, (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO); } if (fs.m == NULL) { unlock_and_deallocate(&fs); VM_WAITPFAULT; goto RetryFault; } } readrest: /* * We have found a valid page or we have allocated a new page. * The page thus may not be valid or may not be entirely * valid. * * Attempt to fault-in the page if there is a chance that the * pager has it, and potentially fault in additional pages * at the same time. */ if (TRYPAGER) { int rv; int reqpage; int ahead, behind; u_char behavior = vm_map_entry_behavior(fs.entry); if (behavior == MAP_ENTRY_BEHAV_RANDOM) { ahead = 0; behind = 0; } else { behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; if (behind > VM_FAULT_READ_BEHIND) behind = VM_FAULT_READ_BEHIND; ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; if (ahead > VM_FAULT_READ_AHEAD) ahead = VM_FAULT_READ_AHEAD; } is_first_object_locked = FALSE; if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || (behavior != MAP_ENTRY_BEHAV_RANDOM && fs.pindex >= fs.entry->lastr && fs.pindex < fs.entry->lastr + VM_FAULT_READ)) && (fs.first_object == fs.object || (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) && fs.first_object->type != OBJT_DEVICE) { vm_pindex_t firstpindex, tmppindex; if (fs.first_pindex < 2 * VM_FAULT_READ) firstpindex = 0; else firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; vm_page_lock_queues(); /* * note: partially valid pages cannot be * included in the lookahead - NFS piecemeal * writes will barf on it badly. */ for (tmppindex = fs.first_pindex - 1; tmppindex >= firstpindex; --tmppindex) { vm_page_t mt; mt = vm_page_lookup(fs.first_object, tmppindex); if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) break; if (mt->busy || (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) || mt->hold_count || mt->wire_count) continue; pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } } vm_page_unlock_queues(); ahead += behind; behind = 0; } if (is_first_object_locked) VM_OBJECT_UNLOCK(fs.first_object); /* * now we find out if any other pages should be paged * in at this time this routine checks to see if the * pages surrounding this fault reside in the same * object as the page for this fault. If they do, * then they are faulted in also into the object. The * array "marray" returned contains an array of * vm_page_t structs where one of them is the * vm_page_t passed to the routine. The reqpage * return value is the index into the marray for the * vm_page_t passed to the routine. * * fs.m plus the additional pages are PG_BUSY'd. * * XXX vm_fault_additional_pages() can block * without releasing the map lock. */ faultcount = vm_fault_additional_pages( fs.m, behind, ahead, marray, &reqpage); /* * update lastr imperfectly (we do not know how much * getpages will actually read), but good enough. * * XXX The following assignment modifies the map * without holding a write lock on it. */ fs.entry->lastr = fs.pindex + faultcount - behind; /* * Call the pager to retrieve the data, if any, after * releasing the lock on the map. We hold a ref on * fs.object and the pages are PG_BUSY'd. */ unlock_map(&fs); rv = faultcount ? vm_pager_get_pages(fs.object, marray, faultcount, reqpage) : VM_PAGER_FAIL; if (rv == VM_PAGER_OK) { /* * Found the page. Leave it busy while we play * with it. */ /* * Relookup in case pager changed page. Pager * is responsible for disposition of old page * if moved. */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (!fs.m) { unlock_and_deallocate(&fs); goto RetryFault; } hardfault++; break; /* break to PAGE HAS BEEN FOUND */ } /* * Remove the bogus page (which does not exist at this * object/offset); before doing so, we must get back * our object lock to preserve our invariant. * * Also wake up any other process that may want to bring * in this page. * * If this is the top-level object, we must leave the * busy page to prevent another process from rushing * past us, and inserting the page in that object at * the same time that we are. */ if (rv == VM_PAGER_ERROR) printf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm); /* * Data outside the range of the pager or an I/O error */ /* * XXX - the check for kernel_map is a kludge to work * around having the machine panic on a kernel space * fault w/ I/O error. */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); fs.m = NULL; /* * XXX - we cannot just fall out at this * point, m has been freed and is invalid! */ } } /* * We get here if the object has default pager (or unwiring) * or the pager doesn't have the page. */ if (fs.object == fs.first_object) fs.first_m = fs.m; /* * Move on to the next object. Lock the next object before * unlocking the current one. */ fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); next_object = fs.object->backing_object; if (next_object == NULL) { /* * If there's no object left, fill the page in the top * object with zeros. */ if (fs.object != fs.first_object) { vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; VM_OBJECT_LOCK(fs.object); } fs.first_m = NULL; /* * Zero the page if necessary and mark it valid. */ if ((fs.m->flags & PG_ZERO) == 0) { pmap_zero_page(fs.m); } else { atomic_add_int(&cnt.v_ozfod, 1); } atomic_add_int(&cnt.v_zfod, 1); fs.m->valid = VM_PAGE_BITS_ALL; break; /* break to PAGE HAS BEEN FOUND */ } else { KASSERT(fs.object != next_object, ("object loop %p", next_object)); VM_OBJECT_LOCK(next_object); vm_object_pip_add(next_object, 1); if (fs.object != fs.first_object) vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = next_object; } } KASSERT((fs.m->flags & PG_BUSY) != 0, ("vm_fault: not busy after main loop")); /* * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock * is held.] */ /* * If the page is being written, but isn't already owned by the * top-level object, we have to copy it into a new page owned by the * top-level object. */ if (fs.object != fs.first_object) { /* * We only really need to copy if we want to write it. */ if (fault_type & VM_PROT_WRITE) { /* * This allows pages to be virtually copied from a * backing_object into the first_object, where the * backing object has no other refs to it, and cannot * gain any more refs. Instead of a bcopy, we just * move the page from the backing object to the * first object. Note that we must mark the page * dirty in the first object so that it will go out * to swap when needed. */ is_first_object_locked = FALSE; if ( /* * Only one shadow object */ (fs.object->shadow_count == 1) && /* * No COW refs, except us */ (fs.object->ref_count == 1) && /* * No one else can look this object up */ (fs.object->handle == NULL) && /* * No other ways to look the object up */ ((fs.object->type == OBJT_DEFAULT) || (fs.object->type == OBJT_SWAP)) && (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && /* * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { vm_page_lock_queues(); /* * get rid of the unnecessary page */ pmap_remove_all(fs.first_m); vm_page_free(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ vm_page_rename(fs.m, fs.first_object, fs.first_pindex); vm_page_busy(fs.m); vm_page_unlock_queues(); fs.first_m = fs.m; fs.m = NULL; atomic_add_int(&cnt.v_cow_optim, 1); } else { /* * Oh, well, lets copy it. */ pmap_copy_page(fs.m, fs.first_m); fs.first_m->valid = VM_PAGE_BITS_ALL; } if (fs.m) { /* * We no longer need the old page or object. */ release_page(&fs); } /* * fs.object != fs.first_object due to above * conditional */ vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); /* * Only use the new page below... */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; if (!is_first_object_locked) VM_OBJECT_LOCK(fs.object); atomic_add_int(&cnt.v_cow_faults, 1); } else { prot &= ~VM_PROT_WRITE; } } /* * We must verify that the maps have not changed since our last * lookup. */ if (!fs.lookup_still_valid) { vm_object_t retry_object; vm_pindex_t retry_pindex; vm_prot_t retry_prot; if (!vm_map_trylock_read(fs.map)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } fs.lookup_still_valid = TRUE; if (fs.map->timestamp != map_generation) { result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); /* * If we don't need the page any longer, put it on the inactive * list (the easiest thing to do here). If no one needs it, * pageout will grab it eventually. */ if (result != KERN_SUCCESS) { release_page(&fs); unlock_and_deallocate(&fs); /* * If retry of map lookup would have blocked then * retry fault from start. */ if (result == KERN_FAILURE) goto RetryFault; return (result); } if ((retry_object != fs.first_object) || (retry_pindex != fs.first_pindex)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } /* * Check whether the protection has changed or the object has * been copied while we left the map unlocked. Changing from * read to write permission is OK - we leave the page * write-protected, and catch the write fault. Changing from * write to read permission means that we can't mark the page * write-enabled after all. */ prot &= retry_prot; } } if (prot & VM_PROT_WRITE) { vm_page_lock_queues(); vm_page_flag_set(fs.m, PG_WRITEABLE); vm_object_set_writeable_dirty(fs.m->object); /* * If the fault is a write, we know that this page is being * written NOW so dirty it explicitly to save on * pmap_is_modified() calls later. * * If this is a NOSYNC mmap we do not want to set PG_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make * sure the page isn't marked NOSYNC. Applications sharing * data should use the same flags to avoid ping ponging. * * Also tell the backing pager, if any, that it should remove * any swap backing since the page is now dirty. */ if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { if (fs.m->dirty == 0) vm_page_flag_set(fs.m, PG_NOSYNC); } else { vm_page_flag_clear(fs.m, PG_NOSYNC); } vm_page_unlock_queues(); if (fault_flags & VM_FAULT_DIRTY) { vm_page_dirty(fs.m); vm_pager_page_unswapped(fs.m); } } /* * Page had better still be busy */ KASSERT(fs.m->flags & PG_BUSY, ("vm_fault: page %p not busy!", fs.m)); /* * Sanity check: page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. */ if (fs.m->valid != VM_PAGE_BITS_ALL) { vm_page_zero_invalid(fs.m, TRUE); printf("Warning: page %p partially invalid on fault\n", fs.m); } VM_OBJECT_UNLOCK(fs.object); /* * Put this page into the physical map. We had to do the unlock above * because pmap_enter() may sleep. We don't put the page * back on the active queue until later so that the pageout daemon * won't find it (yet). */ pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); } VM_OBJECT_LOCK(fs.object); vm_page_lock_queues(); vm_page_flag_set(fs.m, PG_REFERENCED); /* * If the page is not wired down, then put it where the pageout daemon * can find it. */ if (fault_flags & VM_FAULT_WIRE_MASK) { if (wired) vm_page_wire(fs.m); else vm_page_unwire(fs.m, 1); } else { vm_page_activate(fs.m); } vm_page_wakeup(fs.m); vm_page_unlock_queues(); /* * Unlock everything, and return */ unlock_and_deallocate(&fs); PROC_LOCK(curproc); if ((curproc->p_sflag & PS_INMEM) && curproc->p_stats) { if (hardfault) { curproc->p_stats->p_ru.ru_majflt++; } else { curproc->p_stats->p_ru.ru_minflt++; } } PROC_UNLOCK(curproc); return (KERN_SUCCESS); } /* * vm_fault_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of vm_map_pmap_enter, except it runs at page fault time instead * of mmap time. */ static void vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) { int i; vm_offset_t addr, starta; vm_pindex_t pindex; - vm_page_t m, mpte; + vm_page_t m; vm_object_t object; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) return; object = entry->object.vm_object; starta = addra - PFBAK * PAGE_SIZE; if (starta < entry->start) { starta = entry->start; } else if (starta > addra) { starta = 0; } - mpte = NULL; for (i = 0; i < PAGEORDER_SIZE; i++) { vm_object_t backing_object, lobject; addr = addra + prefault_pageorder[i]; if (addr > addra + (PFFOR * PAGE_SIZE)) addr = 0; if (addr < starta || addr >= entry->end) continue; if (!pmap_is_prefaultable(pmap, addr)) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = object; VM_OBJECT_LOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && lobject->type == OBJT_DEFAULT && (backing_object = lobject->backing_object) != NULL) { if (lobject->backing_object_offset & PAGE_MASK) break; pindex += lobject->backing_object_offset >> PAGE_SHIFT; VM_OBJECT_LOCK(backing_object); VM_OBJECT_UNLOCK(lobject); lobject = backing_object; } /* * give-up when a page is not in memory */ if (m == NULL) { VM_OBJECT_UNLOCK(lobject); break; } if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (m->busy == 0) && (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { vm_page_lock_queues(); if ((m->queue - m->pc) == PQ_CACHE) vm_page_deactivate(m); - mpte = pmap_enter_quick(pmap, addr, m, - entry->protection, mpte); + pmap_enter_quick(pmap, addr, m, entry->protection); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(lobject); } } /* * vm_fault_quick: * * Ensure that the requested virtual address, which may be in userland, * is valid. Fault-in the page if necessary. Return -1 on failure. */ int vm_fault_quick(caddr_t v, int prot) { int r; if (prot & VM_PROT_WRITE) r = subyte(v, fubyte(v)); else r = fubyte(v); return(r); } /* * vm_fault_wire: * * Wire down a range of virtual addresses in a map. */ int vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t user_wire, boolean_t fictitious) { vm_offset_t va; int rv; /* * We simulate a fault to get the page and enter it in the physical * map. For user wiring, we only ask for read access on currently * read-only sections. */ for (va = start; va < end; va += PAGE_SIZE) { rv = vm_fault(map, va, user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE, user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING); if (rv) { if (va != start) vm_fault_unwire(map, start, va, fictitious); return (rv); } } return (KERN_SUCCESS); } /* * vm_fault_unwire: * * Unwire a range of virtual addresses in a map. */ void vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t fictitious) { vm_paddr_t pa; vm_offset_t va; pmap_t pmap; pmap = vm_map_pmap(map); /* * Since the pages are wired down, we must be able to get their * mappings from the physical map system. */ for (va = start; va < end; va += PAGE_SIZE) { pa = pmap_extract(pmap, va); if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { vm_page_lock_queues(); vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); vm_page_unlock_queues(); } } } } /* * Routine: * vm_fault_copy_entry * Function: * Copy all of the pages from a wired-down map entry to another. * * In/out conditions: * The source and destination maps must be locked for write. * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) vm_map_t dst_map; vm_map_t src_map; vm_map_entry_t dst_entry; vm_map_entry_t src_entry; { vm_object_t backing_object, dst_object, object; vm_object_t src_object; vm_ooffset_t dst_offset; vm_ooffset_t src_offset; vm_pindex_t pindex; vm_prot_t prot; vm_offset_t vaddr; vm_page_t dst_m; vm_page_t src_m; #ifdef lint src_map++; #endif /* lint */ src_object = src_entry->object.vm_object; src_offset = src_entry->offset; /* * Create the top-level object for the destination entry. (Doesn't * actually shadow anything - we copy the pages directly.) */ dst_object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(dst_entry->end - dst_entry->start)); VM_OBJECT_LOCK(dst_object); dst_entry->object.vm_object = dst_object; dst_entry->offset = 0; prot = dst_entry->max_protection; /* * Loop through all of the pages in the entry's range, copying each * one from the source object (it should be there) to the destination * object. */ for (vaddr = dst_entry->start, dst_offset = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { /* * Allocate a page in the destination object */ do { dst_m = vm_page_alloc(dst_object, OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_UNLOCK(dst_object); VM_WAIT; VM_OBJECT_LOCK(dst_object); } } while (dst_m == NULL); /* * Find the page in the source object, and copy it in. * (Because the source is wired down, the page will be in * memory.) */ VM_OBJECT_LOCK(src_object); object = src_object; pindex = 0; while ((src_m = vm_page_lookup(object, pindex + OFF_TO_IDX(dst_offset + src_offset))) == NULL && (src_entry->protection & VM_PROT_WRITE) == 0 && (backing_object = object->backing_object) != NULL) { /* * Allow fallback to backing objects if we are reading. */ VM_OBJECT_LOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); VM_OBJECT_UNLOCK(object); object = backing_object; } if (src_m == NULL) panic("vm_fault_copy_wired: page missing"); pmap_copy_page(src_m, dst_m); VM_OBJECT_UNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; VM_OBJECT_UNLOCK(dst_object); /* * Enter it in the pmap... */ pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); VM_OBJECT_LOCK(dst_object); vm_page_lock_queues(); if ((prot & VM_PROT_WRITE) != 0) vm_page_flag_set(dst_m, PG_WRITEABLE); /* * Mark it no longer busy, and put it on the active list. */ vm_page_activate(dst_m); vm_page_wakeup(dst_m); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(dst_object); } /* * This routine checks around the requested page for other pages that * might be able to be faulted in. This routine brackets the viable * pages for the pages to be paged in. * * Inputs: * m, rbehind, rahead * * Outputs: * marray (array of vm_page_t), reqpage (index of requested page) * * Return value: * number of pages in marray * * This routine can't block. */ static int vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) vm_page_t m; int rbehind; int rahead; vm_page_t *marray; int *reqpage; { int i,j; vm_object_t object; vm_pindex_t pindex, startpindex, endpindex, tpindex; vm_page_t rtm; int cbehind, cahead; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); object = m->object; pindex = m->pindex; /* * if the requested page is not available, then give up now */ if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { return 0; } if ((cbehind == 0) && (cahead == 0)) { *reqpage = 0; marray[0] = m; return 1; } if (rahead > cahead) { rahead = cahead; } if (rbehind > cbehind) { rbehind = cbehind; } /* * scan backward for the read behind pages -- in memory */ if (pindex > 0) { if (rbehind > pindex) { rbehind = pindex; startpindex = 0; } else { startpindex = pindex - rbehind; } for (tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) { if (vm_page_lookup(object, tpindex)) { startpindex = tpindex + 1; break; } if (tpindex == 0) break; } /* tpindex is unsigned; beware of numeric underflow. */ for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && tpindex < pindex; i++, tpindex--) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (rtm == NULL) { /* * Shift the allocated pages to the * beginning of the array. */ for (j = 0; j < i; j++) { marray[j] = marray[j + tpindex + 1 - startpindex]; } break; } marray[tpindex - startpindex] = rtm; } } else { startpindex = 0; i = 0; } marray[i] = m; /* page offset of the required page */ *reqpage = i; tpindex = pindex + 1; i++; /* * scan forward for the read ahead pages */ endpindex = tpindex + rahead; if (endpindex > object->size) endpindex = object->size; for (; tpindex < endpindex; i++, tpindex++) { if (vm_page_lookup(object, tpindex)) { break; } rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (rtm == NULL) { break; } marray[i] = rtm; } /* return number of pages */ return i; }