Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -4095,29 +4095,30 @@ __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { + struct pctrie_iter pages; vm_offset_t va; vm_page_t m, mpte; - vm_pindex_t diff, psize; int rv; VM_OBJECT_ASSERT_LOCKED(m_start->object); - psize = atop(end - start); mpte = NULL; - m = m_start; + vm_page_iter_limit_init(&pages, m_start->object, + m_start->pindex + atop(end - start)); + m = vm_radix_iter_lookup(&pages, m_start->pindex); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - va = start + ptoa(diff); + while (m != NULL) { + va = start + ptoa(m->pindex - m_start->pindex); if ((va & PDRMASK) == 0 && va + NBPDR <= end && m->psind == 1 && pg_ps_enabled && ((rv = pmap_enter_4mpage(pmap, va, m, prot)) == - KERN_SUCCESS || rv == KERN_NO_SPACE)) - m = &m[NBPDR / PAGE_SIZE - 1]; - else - mpte = pmap_enter_quick_locked(pmap, va, m, prot, - mpte); - m = TAILQ_NEXT(m, listq); + KERN_SUCCESS || rv == KERN_NO_SPACE)) { + m = vm_radix_iter_jump(&pages, NBPDR / PAGE_SIZE); + } else { + mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); + m = vm_radix_iter_step(&pages); + } } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); @@ -4285,6 +4286,7 @@ __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { + struct pctrie_iter pages; pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p; @@ -4297,7 +4299,8 @@ (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { if (!vm_object_populate(object, pindex, pindex + atop(size))) return; - p = vm_page_lookup(object, pindex); + vm_page_iter_init(&pages, object); + p = vm_radix_iter_lookup(&pages, pindex); KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); pat_mode = p->md.pat_mode; @@ -4315,15 +4318,14 @@ * the pages are not physically contiguous or have differing * memory attributes. */ - p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { + p = vm_radix_iter_next(&pages); KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) return; - p = TAILQ_NEXT(p, listq); } /*