Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1615,6 +1615,35 @@ return (nswapdev); } +static void +swp_pager_activate_vmpage(vm_page_t m) +{ + + vm_object_pip_wakeup(m->object); + vm_page_dirty(m); +#ifdef INVARIANTS + vm_page_lock(m); + if (m->wire_count == 0 && m->queue == PQ_NONE) + panic("page %p is neither wired nor queued", m); + vm_page_unlock(m); +#endif + vm_page_xunbusy(m); + vm_pager_page_unswapped(m); +} + +static void +swp_pager_launder_vmpage(vm_page_t m) +{ + + vm_object_pip_wakeup(m->object); + vm_page_dirty(m); + vm_page_lock(m); + vm_page_launder(m); + vm_page_unlock(m); + vm_page_xunbusy(m); + vm_pager_page_unswapped(m); +} + /* * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in * @@ -1628,40 +1657,35 @@ * We also attempt to swap in all other pages in the swap block. * However, we only guarantee that the one at the specified index is * paged in. - * - * XXX - The code to page the whole block in doesn't work, so we - * revert to the one-by-one behavior for now. Sigh. */ -static inline void +static int swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) { - vm_page_t m; + vm_page_t m[SWB_NPAGES]; + int count, i, j, npages, rv; - vm_object_pip_add(object, 1); - m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); - if (m->valid == VM_PAGE_BITS_ALL) { - vm_object_pip_wakeup(object); - vm_page_dirty(m); -#ifdef INVARIANTS - vm_page_lock(m); - if (m->wire_count == 0 && m->queue == PQ_NONE) - panic("page %p is neither wired nor queued", m); - vm_page_unlock(m); -#endif - vm_page_xunbusy(m); - vm_pager_page_unswapped(m); - return; + rv = swap_pager_haspage(object, pindex, NULL, &npages); + KASSERT(rv == 1, ("%s: missing page %p", __func__, m)); + npages += 1; + npages = vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, m, npages); + vm_object_pip_add(object, npages); + + for (i = 0; i < npages;) { + if (m[i]->valid == VM_PAGE_BITS_ALL) { + swp_pager_activate_vmpage(m[i++]); + } else { + for (j = i + 1; j < npages; j++) { + if (m[j]->valid == VM_PAGE_BITS_ALL) + break; + } + count = j - i; + if (swap_pager_getpages(object, &m[i], count, NULL, NULL) != VM_PAGER_OK) + panic("swap_pager_force_pagein: read from swap failed"); + for (; i < j; i++) + swp_pager_launder_vmpage(m[i]); + } } - - if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) - panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ - vm_object_pip_wakeup(object); - vm_page_dirty(m); - vm_page_lock(m); - vm_page_launder(m); - vm_page_unlock(m); - vm_page_xunbusy(m); - vm_pager_page_unswapped(m); + return (npages); } /* @@ -1680,7 +1704,7 @@ struct swblk *sb; vm_object_t object; vm_pindex_t pi; - int i, retries; + int freedpages, i, offset, retries; sx_assert(&swdev_syscall_lock, SA_XLOCKED); @@ -1710,15 +1734,27 @@ if (object->type != OBJT_SWAP) goto next_obj; + freedpages = 0; for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( &object->un_pager.swp.swp_blks, pi)) != NULL; ) { pi = sb->p + SWAP_META_PAGES; - for (i = 0; i < SWAP_META_PAGES; i++) { - if (sb->d[i] == SWAPBLK_NONE) - continue; - if (swp_pager_isondev(sb->d[i], sp)) - swp_pager_force_pagein(object, + /* + * Last swp_pager_force_pagein() call already paged-in + * this block and thus we skip one. + */ + if (freedpages >= SWAP_META_PAGES) { + freedpages -= SWAP_META_PAGES; + continue; + } + for (i = freedpages, freedpages = 0; i < SWAP_META_PAGES;) { + if (swp_pager_isondev(sb->d[i], sp)) { + freedpages = swp_pager_force_pagein(object, sb->p + i); + offset = min(freedpages, SWAP_META_PAGES - i); + freedpages -= offset; + i += offset; + } else + i++; } } next_obj: