Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1853,59 +1853,53 @@ static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) { + struct pctrie_iter pages; vm_map_entry_t entry; vm_object_t first_object; vm_offset_t end, start; - vm_page_t m, m_next; - vm_pindex_t pend, pstart; + vm_page_t m; vm_size_t size; VM_OBJECT_ASSERT_UNLOCKED(fs->object); first_object = fs->first_object; /* Neither fictitious nor unmanaged pages can be reclaimed. */ - if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { - VM_OBJECT_RLOCK(first_object); - size = VM_FAULT_DONTNEED_MIN; - if (MAXPAGESIZES > 1 && size < pagesizes[1]) - size = pagesizes[1]; - end = rounddown2(vaddr, size); - if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && - (entry = fs->entry)->start < end) { - if (end - entry->start < size) - start = entry->start; - else - start = end - size; - pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); - pstart = OFF_TO_IDX(entry->offset) + atop(start - - entry->start); - m_next = vm_page_find_least(first_object, pstart); - pend = OFF_TO_IDX(entry->offset) + atop(end - - entry->start); - while ((m = m_next) != NULL && m->pindex < pend) { - m_next = TAILQ_NEXT(m, listq); - if (!vm_page_all_valid(m) || - vm_page_busied(m)) - continue; + if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) != 0) + return; + VM_OBJECT_RLOCK(first_object); + size = VM_FAULT_DONTNEED_MIN; + if (MAXPAGESIZES > 1 && size < pagesizes[1]) + size = pagesizes[1]; + end = rounddown2(vaddr, size); + if (vaddr - end < size - PAGE_SIZE - ptoa(ahead) || + (entry = fs->entry)->start >= end) + return; + if (end - entry->start < size) + start = entry->start; + else + start = end - size; + pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); + vm_page_iter_limit_init(&pages, first_object, + OFF_TO_IDX(entry->offset) + atop(end - entry->start)); + VM_RADIX_FOREACH_FROM(m, &pages, + OFF_TO_IDX(entry->offset) + atop(start - entry->start)) { + if (!vm_page_all_valid(m) || + vm_page_busied(m)) + continue; - /* - * Don't clear PGA_REFERENCED, since it would - * likely represent a reference by a different - * process. - * - * Typically, at this point, prefetched pages - * are still in the inactive queue. Only - * pages that triggered page faults are in the - * active queue. The test for whether the page - * is in the inactive queue is racy; in the - * worst case we will requeue the page - * unnecessarily. - */ - if (!vm_page_inactive(m)) - vm_page_deactivate(m); - } - } - VM_OBJECT_RUNLOCK(first_object); + /* + * Don't clear PGA_REFERENCED, since it would likely represent a + * reference by a different process. + * + * Typically, at this point, prefetched pages are still in the + * inactive queue. Only pages that triggered page faults are in + * the active queue. The test for whether the page is in the + * inactive queue is racy; in the worst case we will requeue the + * page unnecessarily. + */ + if (!vm_page_inactive(m)) + vm_page_deactivate(m); } + VM_OBJECT_RUNLOCK(first_object); } /*