Index: amd64/amd64/pmap.c =================================================================== --- amd64/amd64/pmap.c +++ amd64/amd64/pmap.c @@ -584,6 +584,12 @@ */ static caddr_t crashdumpmap; +/* + * Internal flags for pmap_enter()'s helper functions. + */ +#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ +#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ + static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); @@ -593,8 +599,8 @@ struct rwlock **lockp); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); -static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, - struct rwlock **lockp); +static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, + u_int flags, struct rwlock **lockp); static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -607,8 +613,10 @@ vm_offset_t va, struct rwlock **lockp); static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va); -static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, struct rwlock **lockp); +static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, struct rwlock **lockp); +static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, + u_int flags, vm_page_t m, struct rwlock **lockp); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); @@ -629,6 +637,9 @@ static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, struct spglist *free); +static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, + pd_entry_t *pde, struct spglist *free, + struct rwlock **lockp); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, @@ -3392,27 +3403,30 @@ } /* - * Conditionally create the PV entry for a 2MB page mapping if the required - * memory can be allocated without resorting to reclamation. + * Create the PV entry for a 2MB page mapping. Always returns true unless the + * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns + * false if the PV entry cannot be allocated without resorting to reclamation. */ -static boolean_t -pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, +static bool +pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; + vm_paddr_t pa; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ - if ((pv = get_pv_entry(pmap, NULL)) != NULL) { - pv->pv_va = va; - CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); - pvh = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); - pvh->pv_gen++; - return (TRUE); - } else - return (FALSE); + if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? + NULL : lockp)) == NULL) + return (false); + pv->pv_va = va; + pa = pde & PG_PS_FRAME; + CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); + pvh = pa_to_pvh(pa); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); + pvh->pv_gen++; + return (true); } /* @@ -3736,6 +3750,44 @@ } /* + * Removes the specified range of addresses from the page table page. + */ +static bool +pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, + pd_entry_t *pde, struct spglist *free, struct rwlock **lockp) +{ + pt_entry_t PG_G, *pte; + vm_offset_t va; + bool anyvalid; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PG_G = pmap_global_bit(pmap); + anyvalid = false; + va = eva; + for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++, + sva += PAGE_SIZE) { + if (*pte == 0) { + if (va != eva) { + pmap_invalidate_range(pmap, va, sva); + va = eva; + } + continue; + } + if ((*pte & PG_G) == 0) + anyvalid = true; + else if (va == eva) + va = sva; + if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) { + sva += PAGE_SIZE; + break; + } + } + if (va != eva) + pmap_invalidate_range(pmap, va, sva); + return (anyvalid); +} + +/* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly @@ -3745,11 +3797,11 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rwlock *lock; - vm_offset_t va, va_next; + vm_offset_t va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; - pt_entry_t *pte, PG_G, PG_V; + pt_entry_t PG_G, PG_V; struct spglist free; int anyvalid; @@ -3852,28 +3904,8 @@ if (va_next > eva) va_next = eva; - va = va_next; - for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, - sva += PAGE_SIZE) { - if (*pte == 0) { - if (va != va_next) { - pmap_invalidate_range(pmap, va, sva); - va = va_next; - } - continue; - } - if ((*pte & PG_G) == 0) - anyvalid = 1; - else if (va == va_next) - va = sva; - if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free, - &lock)) { - sva += PAGE_SIZE; - break; - } - } - if (va != va_next) - pmap_invalidate_range(pmap, va, sva); + if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock)) + anyvalid = 1; } if (lock != NULL) rw_wunlock(lock); @@ -4304,7 +4336,7 @@ */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - u_int flags, int8_t psind __unused) + u_int flags, int8_t psind) { struct rwlock *lock; pd_entry_t *pde; @@ -4332,6 +4364,8 @@ ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); + KASSERT((flags & PMAP_ENTER_RESERVED) == 0, + ("pmap_enter: flags %u has reserved bits set", flags)); pa = VM_PAGE_TO_PHYS(m); newpte = (pt_entry_t)(pa | PG_A | PG_V); if ((flags & VM_PROT_WRITE) != 0) @@ -4348,7 +4382,7 @@ newpte |= PG_U; if (pmap == kernel_pmap) newpte |= PG_G; - newpte |= pmap_cache_bits(pmap, m->md.pat_mode, 0); + newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); /* * Set modified bit gratuitously for writeable mappings if @@ -4365,6 +4399,10 @@ lock = NULL; PMAP_LOCK(pmap); + if (psind == 1) { + rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock); + goto out; + } /* * In the case that a page table page is not @@ -4524,50 +4562,120 @@ } /* - * Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE - * otherwise. Fails if (1) a page table page cannot be allocated without - * blocking, (2) a mapping already exists at the specified virtual address, or - * (3) a pv entry cannot be allocated without reclaiming another pv entry. + * Tries to create a read- and/or execute-only 2MB page mapping. Returns true + * if successful. Returns false if (1) a page table page cannot be allocated + * without sleeping, (2) a mapping already exists at the specified virtual + * address, or (3) a PV entry cannot be allocated without reclaiming another + * PV entry. */ -static boolean_t -pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, +static bool +pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp) { - pd_entry_t *pde, newpde; + pd_entry_t newpde; pt_entry_t PG_V; - vm_page_t mpde; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PG_V = pmap_valid_bit(pmap); + newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | + PG_PS | PG_V; + if ((m->oflags & VPO_UNMANAGED) == 0) + newpde |= PG_MANAGED; + if ((prot & VM_PROT_EXECUTE) == 0) + newpde |= pg_nx; + if (va < VM_MAXUSER_ADDRESS) + newpde |= PG_U; + return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | + PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == + KERN_SUCCESS); +} + +/* + * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if + * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE + * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and + * a mapping already exists at the specified virtual address. Returns + * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table + * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if + * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. + * + * The parameter "m" is only used when creating a managed, writeable mapping. + */ +static int +pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, + vm_page_t m, struct rwlock **lockp) +{ struct spglist free; + pd_entry_t oldpde, *pde; + pt_entry_t PG_G, PG_RW, PG_V; + vm_page_t mt, pdpg; + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT((va & PDRMASK) == 0, ("pmap_enter_pde: va unaligned")); + PG_G = pmap_global_bit(pmap); + PG_RW = pmap_rw_bit(pmap); + KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW, + ("pmap_enter_pde: newpde is missing PG_M")); PG_V = pmap_valid_bit(pmap); - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - - if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { + if ((pdpg = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? + NULL : lockp)) == NULL) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); - return (FALSE); + return (KERN_RESOURCE_SHORTAGE); } - pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde)); + pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); pde = &pde[pmap_pde_index(va)]; - if ((*pde & PG_V) != 0) { - KASSERT(mpde->wire_count > 1, - ("pmap_enter_pde: mpde's wire count is too low")); - mpde->wire_count--; - CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" - " in pmap %p", va, pmap); - return (FALSE); + oldpde = *pde; + if ((oldpde & PG_V) != 0) { + KASSERT(pdpg->wire_count > 1, + ("pmap_enter_pde: pdpg's wire count is too low")); + if ((flags & PMAP_ENTER_NOREPLACE) != 0) { + pdpg->wire_count--; + CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" + " in pmap %p", va, pmap); + return (KERN_FAILURE); + } + /* Break the existing mapping(s). */ + SLIST_INIT(&free); + if ((oldpde & PG_PS) != 0) { + /* + * The reference to the PD page that was acquired by + * pmap_allocpde() ensures that it won't be freed. + * However, if the PDE resulted from a promotion, then + * a reserved PT page could be freed. + */ + (void)pmap_remove_pde(pmap, pde, va, &free, lockp); + if ((oldpde & PG_G) == 0) + pmap_invalidate_pde_page(pmap, va, oldpde); + } else { + pmap_delayed_invl_started(); + if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free, + lockp)) + pmap_invalidate_all(pmap); + pmap_delayed_invl_finished(); + } + pmap_free_zero_pages(&free); + if (va >= VM_MAXUSER_ADDRESS) { + mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); + if (pmap_insert_pt_page(pmap, mt)) { + /* + * XXX Currently, this can't happen because + * we do not perform pmap_enter(psind == 1) + * on the kernel pmap. + */ + panic("pmap_enter_pde: trie insert failed"); + } + } else + KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", + pde)); } - newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | - PG_PS | PG_V; - if ((m->oflags & VPO_UNMANAGED) == 0) { - newpde |= PG_MANAGED; - + if ((newpde & PG_MANAGED) != 0) { /* * Abort this mapping if its PV entry could not be created. */ - if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), - lockp)) { + if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) { SLIST_INIT(&free); - if (pmap_unwire_ptp(pmap, va, mpde, &free)) { + if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { /* * Although "va" is not mapped, paging- * structure caches could nonetheless have @@ -4579,17 +4687,19 @@ } CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); - return (FALSE); + return (KERN_RESOURCE_SHORTAGE); } + if ((newpde & PG_RW) != 0) { + for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) + vm_page_aflag_set(mt, PGA_WRITEABLE); + } } - if ((prot & VM_PROT_EXECUTE) == 0) - newpde |= pg_nx; - if (va < VM_MAXUSER_ADDRESS) - newpde |= PG_U; /* * Increment counters. */ + if ((newpde & PG_W) != 0) + pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); /* @@ -4601,7 +4711,7 @@ atomic_add_long(&pmap_pde_mappings, 1); CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" " in pmap %p", va, pmap); - return (TRUE); + return (KERN_SUCCESS); } /* @@ -4636,7 +4746,7 @@ va = start + ptoa(diff); if ((va & PDRMASK) == 0 && va + NBPDR <= end && m->psind == 1 && pmap_ps_enabled(pmap) && - pmap_enter_pde(pmap, va, m, prot, &lock)) + pmap_enter_2mpage(pmap, va, m, prot, &lock)) m = &m[NBPDR / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, @@ -5069,8 +5179,8 @@ PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde)); pde = &pde[pmap_pde_index(addr)]; if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 || - pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr & - PG_PS_FRAME, &lock))) { + pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, + PMAP_ENTER_NORECLAIM, &lock))) { *pde = srcptepaddr & ~PG_W; pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE); atomic_add_long(&pmap_pde_mappings, 1); Index: vm/pmap.h =================================================================== --- vm/pmap.h +++ vm/pmap.h @@ -100,9 +100,11 @@ /* * Flags for pmap_enter(). The bits in the low-order byte are reserved * for the protection code (vm_prot_t) that describes the fault type. + * Bits 24 through 31 are reserved for the pmap's internal use. */ -#define PMAP_ENTER_NOSLEEP 0x0100 -#define PMAP_ENTER_WIRED 0x0200 +#define PMAP_ENTER_NOSLEEP 0x00000100 +#define PMAP_ENTER_WIRED 0x00000200 +#define PMAP_ENTER_RESERVED 0xFF000000 /* * Define the maximum number of machine-dependent reference bits that are Index: vm/vm_fault.c =================================================================== --- vm/vm_fault.c +++ vm/vm_fault.c @@ -266,8 +266,11 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) { - vm_page_t m; - int rv; + vm_page_t m, m_map; +#if defined(__amd64__) && VM_NRESERVLEVEL > 0 + vm_page_t m_super; +#endif + int flags, psind, rv; MPASS(fs->vp == NULL); m = vm_page_lookup(fs->first_object, fs->first_pindex); @@ -275,14 +278,39 @@ if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) return (KERN_FAILURE); - rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | - PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), 0); + m_map = m; + psind = 0; +#if defined(__amd64__) && VM_NRESERVLEVEL > 0 + if ((m_super = vm_reserv_to_superpage(m)) != NULL && + rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && + roundup2(vaddr, pagesizes[m_super->psind]) <= fs->entry->end) { + MPASS(m_super->object == fs->first_object); + flags = PS_ALL_VALID; + if ((prot & VM_PROT_WRITE) != 0) { + flags |= PS_NONE_BUSY; + if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) + flags |= PS_ALL_DIRTY; + } + if (vm_page_ps_test(m_super, flags, m)) { + m_map = m_super; + psind = m_super->psind; + vaddr = rounddown2(vaddr, pagesizes[psind]); + /* Preset the modified bit for dirty superpages. */ + if ((flags & PS_ALL_DIRTY) != 0) + fault_type |= VM_PROT_WRITE; + } + } +#endif + rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | + PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); if (rv != KERN_SUCCESS) return (rv); vm_fault_fill_hold(m_hold, m); - vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); + if (psind == 0) + vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, + false); VM_OBJECT_RUNLOCK(fs->first_object); - if (!wired) + if (!wired && psind == 0) vm_fault_prefault(fs, vaddr, PFBAK, PFFOR); vm_map_lookup_done(fs->map, fs->entry); curthread->td_ru.ru_minflt++; Index: vm/vm_map.c =================================================================== --- vm/vm_map.c +++ vm/vm_map.c @@ -1962,7 +1962,7 @@ (pagesizes[p->psind] - 1)) == 0) { mask = atop(pagesizes[p->psind]) - 1; if (tmpidx + mask < psize && - vm_page_ps_is_valid(p)) { + vm_page_ps_test(p, PS_ALL_VALID, NULL)) { p += mask; threshold += mask; } Index: vm/vm_page.h =================================================================== --- vm/vm_page.h +++ vm/vm_page.h @@ -438,6 +438,18 @@ } #endif +/* + * Predicates supported by vm_page_ps_test(): + * + * PS_ALL_DIRTY is true only if the entire (super)page is dirty. + * However, it can be spuriously false when the (super)page has become + * dirty in the pmap but that information has not been propagated to the + * machine-independent layer. + */ +#define PS_ALL_DIRTY 0x1 +#define PS_ALL_VALID 0x2 +#define PS_NONE_BUSY 0x4 + void vm_page_busy_downgrade(vm_page_t m); void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared); void vm_page_flash(vm_page_t m); @@ -469,7 +481,7 @@ int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); struct vm_pagequeue *vm_page_pagequeue(vm_page_t m); vm_page_t vm_page_prev(vm_page_t m); -boolean_t vm_page_ps_is_valid(vm_page_t m); +bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); void vm_page_readahead_finish(vm_page_t m); bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -3472,12 +3472,11 @@ } /* - * vm_page_ps_is_valid: - * - * Returns TRUE if the entire (super)page is valid and FALSE otherwise. + * Returns true if all of the specified predicates are true for the entire + * (super)page and false otherwise. */ -boolean_t -vm_page_ps_is_valid(vm_page_t m) +bool +vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) { int i, npages; @@ -3490,10 +3489,25 @@ * occupy adjacent entries in vm_page_array[]. */ for (i = 0; i < npages; i++) { - if (m[i].valid != VM_PAGE_BITS_ALL) - return (FALSE); + if (&m[i] == skip_m) + continue; + if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) + return (false); + if ((flags & PS_ALL_DIRTY) != 0) { + /* + * Calling vm_page_test_dirty() or pmap_is_modified() + * might stop this case from spuriously returning + * "false". However, that would require a write lock + * on the object containing "m[i]". + */ + if (m[i].dirty != VM_PAGE_BITS_ALL) + return (false); + } + if ((flags & PS_ALL_VALID) != 0 && + m[i].valid != VM_PAGE_BITS_ALL) + return (false); } - return (TRUE); + return (true); } /* Index: vm/vm_reserv.h =================================================================== --- vm/vm_reserv.h +++ vm/vm_reserv.h @@ -64,6 +64,7 @@ int vm_reserv_size(int level); vm_paddr_t vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water); +vm_page_t vm_reserv_to_superpage(vm_page_t m); #endif /* VM_NRESERVLEVEL > 0 */ #endif /* _KERNEL */ Index: vm/vm_reserv.c =================================================================== --- vm/vm_reserv.c +++ vm/vm_reserv.c @@ -1121,4 +1121,16 @@ return (new_end); } +/* + * Returns the superpage containing the given page. + */ +vm_page_t +vm_reserv_to_superpage(vm_page_t m) +{ + vm_reserv_t rv; + + rv = vm_reserv_from_page(m); + return (rv->popcnt == VM_LEVEL_0_NPAGES ? rv->pages : NULL); +} + #endif /* VM_NRESERVLEVEL > 0 */